java.net.InetSocketAddress Scala Examples

The following examples show how to use java.net.InetSocketAddress. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: GrpcServerOwner.scala    From daml   with Apache License 2.0 6 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.apiserver

import java.io.IOException
import java.net.{BindException, InetAddress, InetSocketAddress}
import java.util.concurrent.TimeUnit.SECONDS

import com.daml.metrics.Metrics
import com.daml.platform.apiserver.GrpcServerOwner._
import com.daml.ports.Port
import com.daml.resources.{Resource, ResourceOwner}
import com.google.protobuf.Message
import io.grpc.netty.NettyServerBuilder
import io.grpc._
import io.netty.channel.socket.nio.NioServerSocketChannel
import io.netty.handler.ssl.SslContext

import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NoStackTrace

final class GrpcServerOwner(
    address: Option[String],
    desiredPort: Port,
    maxInboundMessageSize: Int,
    sslContext: Option[SslContext] = None,
    interceptors: List[ServerInterceptor] = List.empty,
    metrics: Metrics,
    eventLoopGroups: ServerEventLoopGroups,
    services: Iterable[BindableService],
) extends ResourceOwner[Server] {
  override def acquire()(implicit executionContext: ExecutionContext): Resource[Server] = {
    val host = address.map(InetAddress.getByName).getOrElse(InetAddress.getLoopbackAddress)
    Resource(Future {
      val builder = NettyServerBuilder.forAddress(new InetSocketAddress(host, desiredPort.value))
      builder.sslContext(sslContext.orNull)
      builder.channelType(classOf[NioServerSocketChannel])
      builder.permitKeepAliveTime(10, SECONDS)
      builder.permitKeepAliveWithoutCalls(true)
      builder.directExecutor()
      builder.maxInboundMessageSize(maxInboundMessageSize)
      interceptors.foreach(builder.intercept)
      builder.intercept(new MetricsInterceptor(metrics))
      eventLoopGroups.populate(builder)
      services.foreach { service =>
        builder.addService(service)
        toLegacyService(service).foreach(builder.addService)
      }
      val server = builder.build()
      try {
        server.start()
      } catch {
        case e: IOException if e.getCause != null && e.getCause.isInstanceOf[BindException] =>
          throw new UnableToBind(desiredPort, e.getCause)
      }
      server
    })(server => Future(server.shutdown().awaitTermination()))
  }

  // This exposes the existing services under com.daml also under com.digitalasset.
  // This is necessary to allow applications built with an earlier version of the SDK
  // to still work.
  // The "proxy" services will not show up on the reflection service, because of the way it
  // processes service definitions via protobuf file descriptors.
  private def toLegacyService(service: BindableService): Option[ServerServiceDefinition] = {
    val `com.daml` = "com.daml"
    val `com.digitalasset` = "com.digitalasset"

    val damlDef = service.bindService()
    val damlDesc = damlDef.getServiceDescriptor
    // Only add "proxy" services if it actually contains com.daml in the service name.
    // There are other services registered like the reflection service, that doesn't need the special treatment.
    if (damlDesc.getName.contains(`com.daml`)) {
      val digitalassetName = damlDesc.getName.replace(`com.daml`, `com.digitalasset`)
      val digitalassetDef = ServerServiceDefinition.builder(digitalassetName)
      damlDef.getMethods.forEach { methodDef =>
        val damlMethodDesc = methodDef.getMethodDescriptor
        val digitalassetMethodName =
          damlMethodDesc.getFullMethodName.replace(`com.daml`, `com.digitalasset`)
        val digitalassetMethodDesc =
          damlMethodDesc.toBuilder.setFullMethodName(digitalassetMethodName).build()
        val _ = digitalassetDef.addMethod(
          digitalassetMethodDesc.asInstanceOf[MethodDescriptor[Message, Message]],
          methodDef.getServerCallHandler.asInstanceOf[ServerCallHandler[Message, Message]]
        )
      }
      Option(digitalassetDef.build())
    } else None
  }
}

object GrpcServerOwner {

  final class UnableToBind(port: Port, cause: Throwable)
      extends RuntimeException(
        s"The API server was unable to bind to port $port. Terminate the process occupying the port, or choose a different one.",
        cause)
      with NoStackTrace

} 
Example 2
Source File: CirceSpec.scala    From featherbed   with Apache License 2.0 6 votes vote down vote up
package featherbed.circe

import cats.implicits._
import com.twitter.util.Future
import io.circe._
import io.circe.generic.auto._
import io.circe.parser.parse
import io.circe.syntax._
import org.scalatest.FlatSpec
import shapeless.{Coproduct, Witness}
import shapeless.union.Union

case class Foo(someText: String, someInt: Int)

class CirceSpec extends FlatSpec {

  "post request of a case class" should "derive JSON encoder" in {

    import com.twitter.util.{Future, Await}
    import com.twitter.finagle.{Service, Http}
    import com.twitter.finagle.http.{Request, Response}
    import java.net.InetSocketAddress

    val server = Http.serve(new InetSocketAddress(8766), new Service[Request, Response] {
      def apply(request: Request): Future[Response] = Future {
        val rep = Response()
        rep.contentString = s"${request.contentString}"
        rep.setContentTypeJson()
        rep
      }
    })

    import java.net.URL
    val client = new featherbed.Client(new URL("http://localhost:8766/api/"))

    import io.circe.generic.auto._

    val req = client.post("foo/bar")
      .withContent(Foo("Hello world!", 42), "application/json")
        .accept[Coproduct.`"application/json"`.T]

    val result = Await.result {
       req.send[Foo]()
    }

    Foo("test", 42).asJson.toString

    parse("""{"someText": "test", "someInt": 42}""").toValidated.map(_.as[Foo])

    Await.result(server.close())

  }

  "API example" should "compile" in {
    import shapeless.Coproduct
    import java.net.URL
    import com.twitter.util.Await
    case class Post(userId: Int, id: Int, title: String, body: String)

    case class Comment(postId: Int, id: Int, name: String, email: String, body: String)
    class JSONPlaceholderAPI(baseUrl: URL) {

      private val client = new featherbed.Client(baseUrl)
      type JSON = Coproduct.`"application/json"`.T

      object posts {

        private val listRequest = client.get("posts").accept[JSON]
        private val getRequest = (id: Int) => client.get(s"posts/$id").accept[JSON]

        def list(): Future[Seq[Post]] = listRequest.send[Seq[Post]]()
        def get(id: Int): Future[Post] = getRequest(id).send[Post]()
      }

      object comments {
        private val listRequest = client.get("comments").accept[JSON]
        private val getRequest = (id: Int) => client.get(s"comments/$id").accept[JSON]

        def list(): Future[Seq[Comment]] = listRequest.send[Seq[Comment]]()
        def get(id: Int): Future[Comment] = getRequest(id).send[Comment]()
      }
    }

    val apiClient = new JSONPlaceholderAPI(new URL("http://jsonplaceholder.typicode.com/"))

    Await.result(apiClient.posts.list())
  }

} 
Example 3
Source File: MetricsReporter.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.configuration

import java.net.{InetAddress, InetSocketAddress}
import java.nio.file.{Path, Paths}

import com.codahale.metrics
import com.codahale.metrics.{MetricRegistry, ScheduledReporter}
import com.daml.platform.sandbox.config.InvalidConfigException
import com.google.common.net.HostAndPort
import scopt.Read

import scala.util.Try

sealed trait MetricsReporter {
  def register(registry: MetricRegistry): ScheduledReporter
}

object MetricsReporter {

  case object Console extends MetricsReporter {
    override def register(registry: MetricRegistry): ScheduledReporter =
      metrics.ConsoleReporter
        .forRegistry(registry)
        .build()
  }

  final case class Csv(directory: Path) extends MetricsReporter {
    override def register(registry: MetricRegistry): ScheduledReporter =
      metrics.CsvReporter
        .forRegistry(registry)
        .build(directory.toFile)
  }

  final case class Graphite(address: InetSocketAddress) extends MetricsReporter {
    override def register(registry: MetricRegistry): ScheduledReporter =
      metrics.graphite.GraphiteReporter
        .forRegistry(registry)
        .build(new metrics.graphite.Graphite(address))
  }

  object Graphite {
    val defaultHost: InetAddress = InetAddress.getLoopbackAddress
    val defaultPort: Int = 2003

    def apply(): Graphite =
      Graphite(new InetSocketAddress(defaultHost, defaultPort))

    def apply(port: Int): Graphite =
      Graphite(new InetSocketAddress(defaultHost, port))
  }

  implicit val metricsReporterRead: Read[MetricsReporter] = Read.reads {
    _.split(":", 2).toSeq match {
      case Seq("console") => Console
      case Seq("csv", directory) => Csv(Paths.get(directory))
      case Seq("graphite") =>
        Graphite()
      case Seq("graphite", address) =>
        Try(address.toInt)
          .map(port => Graphite(port))
          .recover {
            case _: NumberFormatException =>
              //noinspection UnstableApiUsage
              val hostAndPort = HostAndPort
                .fromString(address)
                .withDefaultPort(Graphite.defaultPort)
              Graphite(new InetSocketAddress(hostAndPort.getHost, hostAndPort.getPort))
          }
          .get
      case _ =>
        throw new InvalidConfigException(
          """Must be one of "console", "csv:PATH", or "graphite[:HOST][:PORT]".""")
    }
  }

} 
Example 4
Source File: GrpcClientResource.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.services

import java.net.{InetAddress, InetSocketAddress}
import java.util.concurrent.TimeUnit

import com.daml.platform.apiserver.EventLoopGroupOwner
import com.daml.ports.Port
import com.daml.resources.{Resource, ResourceOwner}
import io.grpc.Channel
import io.grpc.netty.NettyChannelBuilder
import io.netty.channel.EventLoopGroup

import scala.concurrent.{ExecutionContext, Future}

object GrpcClientResource {
  def owner(port: Port): ResourceOwner[Channel] =
    for {
      eventLoopGroup <- new EventLoopGroupOwner("api-client", sys.runtime.availableProcessors())
      channel <- channelOwner(port, EventLoopGroupOwner.clientChannelType, eventLoopGroup)
    } yield channel

  private def channelOwner(
      port: Port,
      channelType: Class[_ <: io.netty.channel.Channel],
      eventLoopGroup: EventLoopGroup,
  ): ResourceOwner[Channel] =
    new ResourceOwner[Channel] {
      override def acquire()(implicit executionContext: ExecutionContext): Resource[Channel] = {
        Resource(Future {
          NettyChannelBuilder
            .forAddress(new InetSocketAddress(InetAddress.getLoopbackAddress, port.value))
            .channelType(channelType)
            .eventLoopGroup(eventLoopGroup)
            .usePlaintext()
            .directExecutor()
            .build()
        })(channel =>
          Future {
            channel.shutdownNow()
            if (!channel.awaitTermination(5, TimeUnit.SECONDS)) {
              sys.error(
                "Unable to shutdown channel to a remote API under tests. Unable to recover. Terminating.")
            }
        })
      }
    }
} 
Example 5
Source File: ReferenceServiceAndClientSpecHttp.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter.operation

import com.daml.grpc.adapter.client.ReferenceClientCompatibilityCheck
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpec}
import java.net.InetSocketAddress

class ReferenceServiceAndClientHttpSpec
    extends WordSpec
    with Matchers
    with BeforeAndAfterAll
    with ScalaFutures
    with ReferenceClientCompatibilityCheck
    with ReferenceServiceFixture {

  "Reference service" when {

    "testing with reference client" should {
      behave like referenceClientCompatible(clientStub)
    }
  }
  override def socketAddress = Some(new InetSocketAddress("127.0.0.1", 0))
} 
Example 6
Source File: ServerStreamingBenchmark.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter

import akka.Done
import akka.stream.scaladsl.Sink
import com.daml.grpc.adapter.client.akka.ClientAdapter
import com.daml.grpc.adapter.operation.AkkaServiceFixture
import com.daml.ledger.api.perf.util.AkkaStreamPerformanceTest
import com.daml.ledger.api.testing.utils.Resource
import com.daml.platform.hello.{HelloRequest, HelloServiceGrpc}
import io.grpc.ManagedChannel
import org.scalameter.api.Gen
import org.scalameter.picklers.noPickler._

import scala.concurrent.Future
import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

object ServerStreamingBenchmark extends AkkaStreamPerformanceTest {

  override type ResourceType = () => ManagedChannel

  @transient override protected lazy val resource: Resource[() => ManagedChannel] =
    AkkaServiceFixture.getResource(Some(new InetSocketAddress(0))).map(_._2.channel)

  private val sizes = for {
    totalElements <- Gen.range("numResponses")(50000, 100000, 50000)
    clients <- Gen.enumeration("numClients")(1, 10)
    callsPerClient <- Gen.enumeration("numCals")(1, 10)
  } yield (totalElements, clients, callsPerClient)

  performance of "Akka-Stream server" config (daConfig: _*) in {
    measure method "server streaming" in {
      using(sizes).withLifecycleManagement() in {
        case (totalElements, clients, callsPerClient) =>
          val eventualDones = for {
            (channel, schedulerPool) <- 1
              .to(clients)
              .map(i => resource.value() -> new AkkaExecutionSequencerPool(s"client-$i")(system))
            _ <- 1.to(callsPerClient)
          } yield {
            serverStreamingCall(totalElements / clients / callsPerClient, channel)(schedulerPool)
              .map(_ => channel -> schedulerPool)
          }
          val eventualTuples = Future.sequence(eventualDones)
          await(eventualTuples).foreach {
            case (channel, pool) =>
              channel.shutdown()
              channel.awaitTermination(5, TimeUnit.SECONDS)
              pool.close()
          }

      }
    }
  }

  private def serverStreamingCall(streamedElements: Int, managedChannel: ManagedChannel)(
      implicit
      executionSequencerFactory: ExecutionSequencerFactory): Future[Done] = {
    ClientAdapter
      .serverStreaming(
        HelloRequest(streamedElements),
        HelloServiceGrpc.stub(managedChannel).serverStreaming)
      .runWith(Sink.ignore)(materializer)
  }
} 
Example 7
Source File: BaseManager.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.core.peer

import java.net.InetSocketAddress

import cats.effect._
import cats.effect.concurrent.Ref
import cats.implicits._
import fs2.io.tcp.Socket
import jbok.codec.rlp.implicits._
import jbok.common.math.N
import jbok.core.config.FullConfig
import jbok.core.ledger.History
import jbok.core.messages.Status
import jbok.core.queue.Queue
import jbok.network.tcp.implicits._
import jbok.network.{Message, Request}

import scala.util.control.NoStackTrace

final case class Incompatible(local: Status, remote: Status) extends NoStackTrace {
  override def toString: String = s"peer incompatible chainId:${local.chainId}/${remote.chainId} genesis:${local.genesisHash.toHex}/${remote.genesisHash.toHex}"
}

abstract class BaseManager[F[_]](config: FullConfig, history: History[F])(implicit F: Concurrent[F]) {
  def inbound: Queue[F, Peer[F], Message[F]]

  val connected: Ref[F, Map[PeerUri, (Peer[F], Socket[F])]] = Ref.unsafe(Map.empty)

  def isConnected(uri: PeerUri): F[Boolean] = connected.get.map(_.get(uri).isDefined)

  def close(uri: PeerUri): F[Unit] =
    connected.get.map(_.get(uri)).flatMap {
      case Some((_, socket)) => socket.endOfOutput >> socket.close
      case _                 => F.unit
    }

  val localStatus: F[Status] =
    for {
      genesis <- history.genesisHeader
      number  <- history.getBestBlockNumber
      td      <- history.getTotalDifficultyByNumber(number).map(_.getOrElse(N(0)))
    } yield Status(history.chainId, genesis.hash, number, td, config.service.uri)

  def handshake(socket: Socket[F]): F[Peer[F]] =
    for {
      localStatus <- localStatus
      request = Request.binary[F, Status](Status.name, localStatus.encoded)
      _            <- socket.writeMessage(request)
      remoteStatus <- socket.readMessage.flatMap(_.as[Status])
      remote       <- socket.remoteAddress.map(_.asInstanceOf[InetSocketAddress])
      peer <- if (!localStatus.isCompatible(remoteStatus)) {
        F.raiseError(Incompatible(localStatus, remoteStatus))
      } else {
        Peer[F](PeerUri.fromTcpAddr(remote), remoteStatus)
      }
    } yield peer

  val seedDisconnects: F[List[PeerUri]] = config.peer.seedUris.filterA(uri => isConnected(uri).map(b => !b))

  val seedConnects: F[List[PeerUri]] = config.peer.seedUris.filterA(uri => isConnected(uri))
} 
Example 8
Source File: IncomingManager.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.core.peer

import java.net.InetSocketAddress
import java.nio.channels.AsynchronousChannelGroup

import cats.effect._
import cats.effect.concurrent.Deferred
import cats.effect.implicits._
import cats.implicits._
import fs2._
import fs2.io.tcp.Socket
import javax.net.ssl.SSLContext
import jbok.common.log.Logger
import jbok.core.config.FullConfig
import jbok.core.ledger.History
import jbok.core.queue.Queue
import jbok.network.Message
import jbok.network.tcp.implicits._

final class IncomingManager[F[_]](config: FullConfig, history: History[F], ssl: Option[SSLContext], val inbound: Queue[F, Peer[F], Message[F]])(
    implicit F: ConcurrentEffect[F],
    cs: ContextShift[F],
    acg: AsynchronousChannelGroup
) extends BaseManager[F](config, history) {
  private val log = Logger[F]

  val localBindAddress: Deferred[F, InetSocketAddress] = Deferred.unsafe[F, InetSocketAddress]

  val localPeerUri: F[PeerUri] = localBindAddress.get.map(addr => PeerUri.fromTcpAddr(addr))

  val peers: Stream[F, Resource[F, (Peer[F], Socket[F])]] =
    Socket
      .serverWithLocalAddress[F](
        address = config.peer.bindAddr,
        maxQueued = 10,
        reuseAddress = true,
        receiveBufferSize = config.peer.bufferSize
      )
      .flatMap {
        case Left(bound) =>
          Stream.eval_(log.i(s"IncomingManager successfully bound to address ${bound}") >> localBindAddress.complete(bound))
        case Right(res) =>
          Stream.emit {
            for {
              socket    <- res
              tlsSocket <- Resource.liftF(socket.toTLSSocket(ssl, client = false))
              peer      <- Resource.liftF(handshake(socket))
              _         <- Resource.make(connected.update(_ + (peer.uri -> (peer -> socket))).as(peer))(peer => connected.update(_ - peer.uri))
              _         <- Resource.liftF(log.i(s"accepted incoming peer ${peer.uri}"))
            } yield (peer, tlsSocket)
          }
      }

  val serve: Stream[F, Unit] =
    peers
      .map { res =>
        Stream
          .resource(res)
          .flatMap {
            case (peer, socket) =>
              Stream(
                socket
                  .reads(config.peer.bufferSize, None)
                  .through(Message.decodePipe[F])
                  .map(m => peer -> m)
                  .through(inbound.sink)
                  .onFinalize(log.i(s"disconnected incoming ${peer.uri}") >> connected.update(_ - peer.uri)),
                peer.queue.dequeue.through(Message.encodePipe[F]).through(socket.writes(None))
              ).parJoinUnbounded
          }
          .handleErrorWith(e => Stream.eval(log.w(s"handle incoming peer failure: ${e}", e)))
      }
      .parJoin(config.peer.maxIncomingPeers)

  val resource: Resource[F, PeerUri] = Resource {
    for {
      fiber   <- serve.compile.drain.start
      address <- localBindAddress.get
    } yield PeerUri.fromTcpAddr(address) -> fiber.cancel
  }
} 
Example 9
Source File: NetworkBuilder.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.core.config

import java.net.InetSocketAddress
import java.nio.file.{Path, Paths}

import better.files.File
import cats.effect.IO
import cats.implicits._
import io.circe.syntax._
import jbok.common.config.Config
import jbok.core.keystore.KeyStorePlatform
import jbok.core.models.Address
import jbok.core.peer.PeerUri
import jbok.crypto.signature.KeyPair
import monocle.macros.syntax.lens._

import sys.process.{ProcessLogger, stringSeqToProcess}
import scala.concurrent.duration._

final case class NetworkBuilder(
    base: FullConfig,
    configs: List[FullConfig] = Nil,
) {
  val home = System.getProperty("user.home")
  val root = Paths.get(home).resolve(".jbok")

  def withBlockPeriod(n: Int): NetworkBuilder =
    copy(base = base.lens(_.mining.period).set(n.millis))

  def createCert(ip: String, cn: String, caDir: Path, certDir: Path): IO[String] = IO {
    val path = File(".")
    val projectDir = path.path.toAbsolutePath
    val processLogger = new ProcessLogger {
      override def out(s: => String): Unit = println(s)
      override def err(s: => String): Unit = println(s)
      override def buffer[T](f: => T): T = f
    }

    Seq("bash", "-c", s"${projectDir.resolve("bin/create-cert.sh")} ${ip} ${cn} ${projectDir.resolve("bin").toAbsolutePath} ${caDir.toAbsolutePath} ${certDir.toAbsolutePath}")
      .lineStream_!(processLogger)
      .mkString("\n")
  }

  def addNode(keyPair: KeyPair, coinbase: Address, rootPath: Path, host: String): NetworkBuilder = {
    val config = base
      .lens(_.rootPath).set(rootPath.toAbsolutePath.toString)
      .lens(_.peer.host).set(host)
      .lens(_.service.local).set(host)
      .lens(_.service.enableMetrics).set(true)
//      .lens(_.service.secure).set(true)
      .lens(_.mining.enabled).set(true)
      .lens(_.mining.address).set(Address(keyPair))
      .lens(_.mining.coinbase).set(coinbase)
//      .lens(_.ssl.enabled).set(true)
      .lens(_.ssl.trustStorePath).set(rootPath.resolve("cert/cacert.jks").toAbsolutePath.toString)
      .lens(_.ssl.keyStorePath).set(rootPath.resolve("cert/server.jks").toAbsolutePath.toString)
      .lens(_.persist.driver).set("rocksdb")
      .lens(_.persist.path).set(s"${rootPath.resolve("data").toAbsolutePath}")
      .lens(_.log.logDir).set(s"${rootPath.resolve("logs").toAbsolutePath}")
      .lens(_.keystore.dir).set(s"${rootPath.resolve("keystore").toAbsolutePath}")
      .lens(_.db.driver).set("org.sqlite.JDBC")
      .lens(_.db.url).set(s"jdbc:sqlite:${rootPath.resolve(s"service.db")}")

    val keystore = new KeyStorePlatform[IO](config.keystore)
    keystore.importPrivateKey(keyPair.secret.bytes, "changeit").unsafeRunSync()

    createCert(host, host, root.resolve("ca"), rootPath.resolve("cert")).unsafeRunSync()
    copy(configs = config :: configs)
  }

  def build: List[FullConfig] = {
    val reversed = configs.reverse
    val seeds = reversed.map(_.peer).map { peer =>
      PeerUri.fromTcpAddr(new InetSocketAddress(peer.host, peer.port)).uri
    }

    reversed.zipWithIndex.map { case (config, i) => config.lens(_.peer.seeds).set(seeds.take(i) ++ seeds.drop(i + 1)) }
  }

  def dump: IO[Unit] =
    build.traverse_(config => Config[IO].dump(config.asJson, Paths.get(config.rootPath).resolve(s"config.yaml")))
} 
Example 10
Source File: ServiceConfig.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.core.config

import java.net.InetSocketAddress

import io.circe.generic.extras.ConfiguredJsonCodec
import jbok.codec.json.implicits._

@ConfiguredJsonCodec
final case class ServiceConfig(
    enable: Boolean,
    enableHttp2: Boolean,
    enableWebsockets: Boolean,
    secure: Boolean,
    logHeaders: Boolean,
    logBody: Boolean,
    enableMetrics: Boolean,
    allowedOrigins: List[String],
    local: String,
    host: String,
    port: Int,
    apis: List[String]
) {
  val addr = new InetSocketAddress(local, port)

  val uri: String = (if (secure) "https:" else "http:") + s"//${host}:${port}"
}

@ConfiguredJsonCodec
final case class AppConfig(
    db: DatabaseConfig,
    service: ServiceConfig
) 
Example 11
Source File: ChaosInterface.scala    From eventuate-chaos   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.chaos

import java.net.InetSocketAddress

import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.ActorRef
import akka.io.IO
import akka.io.Tcp
import akka.util.ByteString

abstract class ChaosInterface extends Actor with ActorLogging {
  val port = 8080
  val endpoint = new InetSocketAddress(port)
  val command = """(?s)(\w+)\s+(\d+).*""".r

  implicit val ec = context.dispatcher

  IO(Tcp)(context.system) ! Tcp.Bind(self, endpoint)

  println(s"Now listening on port $port")

  def handleCommand: PartialFunction[(String, Option[Int], ActorRef), Unit]

  protected def reply(message: String, receiver: ActorRef) = {
    receiver ! Tcp.Write(ByteString(message))
    receiver ! Tcp.Close
  }

  protected def closeOnError(receiver: ActorRef): PartialFunction[Throwable, Unit] = {
    case err: Throwable =>
      receiver ! Tcp.Close
  }

  def receive: Receive = {
    case Tcp.Connected(remote, _) =>
      sender ! Tcp.Register(self)

    case Tcp.Received(bs) =>
      val content = bs.utf8String

      content match {
        case command(c, value) if handleCommand.isDefinedAt(c, Some(value.toInt), sender) =>
          handleCommand(c, Some(value.toInt), sender)
        case c if c.startsWith("quit") =>
          context.system.terminate()
        case c if handleCommand.isDefinedAt(c, None, sender) =>
          handleCommand(c, None, sender)
        case _ =>
          sender ! Tcp.Close
      }

    case Tcp.Closed =>
    case Tcp.PeerClosed =>
  }
} 
Example 12
Source File: FlumeInputDStream.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.flume

import java.io.{Externalizable, ObjectInput, ObjectOutput}
import java.net.InetSocketAddress
import java.nio.ByteBuffer
import java.util.concurrent.Executors

import scala.collection.JavaConverters._
import scala.reflect.ClassTag

import org.apache.avro.ipc.NettyServer
import org.apache.avro.ipc.specific.SpecificResponder
import org.apache.flume.source.avro.{AvroFlumeEvent, AvroSourceProtocol, Status}
import org.jboss.netty.channel.{ChannelPipeline, ChannelPipelineFactory, Channels}
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory
import org.jboss.netty.handler.codec.compression._

import org.apache.spark.internal.Logging
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream._
import org.apache.spark.streaming.receiver.Receiver
import org.apache.spark.util.Utils

private[streaming]
class FlumeInputDStream[T: ClassTag](
  _ssc: StreamingContext,
  host: String,
  port: Int,
  storageLevel: StorageLevel,
  enableDecompression: Boolean
) extends ReceiverInputDStream[SparkFlumeEvent](_ssc) {

  override def getReceiver(): Receiver[SparkFlumeEvent] = {
    new FlumeReceiver(host, port, storageLevel, enableDecompression)
  }
}


  private[streaming]
  class CompressionChannelPipelineFactory extends ChannelPipelineFactory {
    def getPipeline(): ChannelPipeline = {
      val pipeline = Channels.pipeline()
      val encoder = new ZlibEncoder(6)
      pipeline.addFirst("deflater", encoder)
      pipeline.addFirst("inflater", new ZlibDecoder())
      pipeline
    }
  }
} 
Example 13
Source File: FlumeTestUtils.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.flume

import java.net.{InetSocketAddress, ServerSocket}
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util.{List => JList}
import java.util.Collections

import scala.collection.JavaConverters._

import org.apache.avro.ipc.NettyTransceiver
import org.apache.avro.ipc.specific.SpecificRequestor
import org.apache.commons.lang3.RandomUtils
import org.apache.flume.source.avro
import org.apache.flume.source.avro.{AvroFlumeEvent, AvroSourceProtocol}
import org.jboss.netty.channel.ChannelPipeline
import org.jboss.netty.channel.socket.SocketChannel
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory
import org.jboss.netty.handler.codec.compression.{ZlibDecoder, ZlibEncoder}

import org.apache.spark.util.Utils
import org.apache.spark.SparkConf


  private class CompressionChannelFactory(compressionLevel: Int)
    extends NioClientSocketChannelFactory {

    override def newChannel(pipeline: ChannelPipeline): SocketChannel = {
      val encoder = new ZlibEncoder(compressionLevel)
      pipeline.addFirst("deflater", encoder)
      pipeline.addFirst("inflater", new ZlibDecoder())
      super.newChannel(pipeline)
    }
  }

} 
Example 14
Source File: GraphiteSink.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.net.InetSocketAddress
import java.util.Properties
import java.util.concurrent.TimeUnit

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.graphite.{Graphite, GraphiteReporter, GraphiteUDP}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class GraphiteSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val GRAPHITE_DEFAULT_PERIOD = 10
  val GRAPHITE_DEFAULT_UNIT = "SECONDS"
  val GRAPHITE_DEFAULT_PREFIX = ""

  val GRAPHITE_KEY_HOST = "host"
  val GRAPHITE_KEY_PORT = "port"
  val GRAPHITE_KEY_PERIOD = "period"
  val GRAPHITE_KEY_UNIT = "unit"
  val GRAPHITE_KEY_PREFIX = "prefix"
  val GRAPHITE_KEY_PROTOCOL = "protocol"

  def propertyToOption(prop: String): Option[String] = Option(property.getProperty(prop))

  if (!propertyToOption(GRAPHITE_KEY_HOST).isDefined) {
    throw new Exception("Graphite sink requires 'host' property.")
  }

  if (!propertyToOption(GRAPHITE_KEY_PORT).isDefined) {
    throw new Exception("Graphite sink requires 'port' property.")
  }

  val host = propertyToOption(GRAPHITE_KEY_HOST).get
  val port = propertyToOption(GRAPHITE_KEY_PORT).get.toInt

  val pollPeriod = propertyToOption(GRAPHITE_KEY_PERIOD) match {
    case Some(s) => s.toInt
    case None => GRAPHITE_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = propertyToOption(GRAPHITE_KEY_UNIT) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
    case None => TimeUnit.valueOf(GRAPHITE_DEFAULT_UNIT)
  }

  val prefix = propertyToOption(GRAPHITE_KEY_PREFIX).getOrElse(GRAPHITE_DEFAULT_PREFIX)

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val graphite = propertyToOption(GRAPHITE_KEY_PROTOCOL).map(_.toLowerCase) match {
    case Some("udp") => new GraphiteUDP(new InetSocketAddress(host, port))
    case Some("tcp") | None => new Graphite(new InetSocketAddress(host, port))
    case Some(p) => throw new Exception(s"Invalid Graphite protocol: $p")
  }

  val reporter: GraphiteReporter = GraphiteReporter.forRegistry(registry)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .prefixedWith(prefix)
      .build(graphite)

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 15
Source File: NettyRpcHandlerSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.rpc.netty

import java.net.InetSocketAddress
import java.nio.ByteBuffer

import io.netty.channel.Channel
import org.mockito.Matchers._
import org.mockito.Mockito._

import org.apache.spark.SparkFunSuite
import org.apache.spark.network.client.{TransportClient, TransportResponseHandler}
import org.apache.spark.network.server.StreamManager
import org.apache.spark.rpc._

class NettyRpcHandlerSuite extends SparkFunSuite {

  val env = mock(classOf[NettyRpcEnv])
  val sm = mock(classOf[StreamManager])
  when(env.deserialize(any(classOf[TransportClient]), any(classOf[ByteBuffer]))(any()))
    .thenReturn(RequestMessage(RpcAddress("localhost", 12345), null, null))

  test("receive") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelActive(client)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
  }

  test("connectionTerminated") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelActive(client)

    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelInactive(client)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
    verify(dispatcher, times(1)).postToAll(
      RemoteProcessDisconnected(RpcAddress("localhost", 40000)))
  }

} 
Example 16
Source File: Node.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.network.discovery

import java.net.{InetSocketAddress, _}

import akka.util.ByteString
import io.iohk.ethereum.network
import io.iohk.ethereum.utils.Logger
import org.spongycastle.util.encoders.Hex

import scala.util.{Failure, Success, Try}

case class Node(id: ByteString, addr: InetSocketAddress) {
  def toUri: URI = {
    val host = network.getHostName(addr.getAddress)
    val port = addr.getPort
    new URI(s"enode://${Hex.toHexString(id.toArray[Byte])}@$host:$port")
  }
}

object Node {
  def fromUri(uri: URI): Node = {
    val nodeId = ByteString(Hex.decode(uri.getUserInfo))
    Node(nodeId, new InetSocketAddress(uri.getHost, uri.getPort))
  }
}

object NodeParser extends Logger {
  val NodeScheme = "enode"
  val NodeIdSize = 64

  
  def parseNodes(unParsedNodes: Set[String]): Set[Node] = unParsedNodes.foldLeft[Set[Node]](Set.empty) {
    case (parsedNodes, nodeString) =>
      val maybeNode = NodeParser.parseNode(nodeString)
      maybeNode match {
        case Right(node) => parsedNodes + node
        case Left(errors) =>
          log.warn(s"Unable to parse node: $nodeString due to: ${errors.map(_.getMessage).mkString("; ")}")
          parsedNodes
      }
  }
} 
Example 17
Source File: ServerActor.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.network

import java.net.InetSocketAddress

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.agent.Agent
import akka.io.Tcp.{Bind, Bound, CommandFailed, Connected}
import akka.io.{IO, Tcp}
import io.iohk.ethereum.utils.{NodeStatus, ServerStatus}
import org.spongycastle.util.encoders.Hex

class ServerActor(nodeStatusHolder: Agent[NodeStatus], peerManager: ActorRef) extends Actor with ActorLogging {

  import ServerActor._
  import context.system

  override def receive: Receive = {
    case StartServer(address) =>
      IO(Tcp) ! Bind(self, address)
      context become waitingForBindingResult
  }

  def waitingForBindingResult: Receive = {
    case Bound(localAddress) =>
      val nodeStatus = nodeStatusHolder()
      log.info("Listening on {}", localAddress)
      log.info("Node address: enode://{}@{}:{}",
        Hex.toHexString(nodeStatus.nodeId),
        getHostName(localAddress.getAddress),
        localAddress.getPort)
      nodeStatusHolder.send(_.copy(serverStatus = ServerStatus.Listening(localAddress)))
      context become listening

    case CommandFailed(b: Bind) =>
      log.warning("Binding to {} failed", b.localAddress)
      context stop self
  }

  def listening: Receive = {
    case Connected(remoteAddress, _) =>
      val connection = sender()
      peerManager ! PeerManagerActor.HandlePeerConnection(connection, remoteAddress)
  }
}

object ServerActor {
  def props(nodeStatusHolder: Agent[NodeStatus], peerManager: ActorRef): Props =
    Props(new ServerActor(nodeStatusHolder, peerManager))

  case class StartServer(address: InetSocketAddress)
} 
Example 18
Source File: NodeStatus.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.utils

import java.net.InetSocketAddress

import io.iohk.ethereum.network._
import org.spongycastle.crypto.AsymmetricCipherKeyPair
import org.spongycastle.crypto.params.ECPublicKeyParameters

sealed trait ServerStatus
object ServerStatus {
  case object NotListening extends ServerStatus
  case class Listening(address: InetSocketAddress) extends ServerStatus
}

case class NodeStatus(
    key: AsymmetricCipherKeyPair,
    serverStatus: ServerStatus,
    discoveryStatus: ServerStatus) {

  val nodeId = key.getPublic.asInstanceOf[ECPublicKeyParameters].toNodeId
} 
Example 19
Source File: NetServiceSpec.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.jsonrpc

import java.net.InetSocketAddress

import akka.actor.ActorSystem
import akka.agent.Agent
import akka.testkit.TestProbe
import io.iohk.ethereum.{NormalPatience, crypto}
import io.iohk.ethereum.jsonrpc.NetService._
import io.iohk.ethereum.network.{Peer, PeerActor, PeerManagerActor}
import io.iohk.ethereum.nodebuilder.SecureRandomBuilder
import io.iohk.ethereum.utils.{NodeStatus, ServerStatus}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{FlatSpec, Matchers}

import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global

class NetServiceSpec extends FlatSpec with Matchers with ScalaFutures with NormalPatience with SecureRandomBuilder {

  "NetService" should "return handshaked peer count" in new TestSetup {
    val resF = netService.peerCount(PeerCountRequest())

    peerManager.expectMsg(PeerManagerActor.GetPeers)
    peerManager.reply(PeerManagerActor.Peers(Map(
      Peer(new InetSocketAddress(1), testRef, false) -> PeerActor.Status.Handshaked,
      Peer(new InetSocketAddress(2), testRef, false) -> PeerActor.Status.Handshaked,
      Peer(new InetSocketAddress(3), testRef, false) -> PeerActor.Status.Connecting)))

    resF.futureValue shouldBe Right(PeerCountResponse(2))
  }

  it should "return listening response" in new TestSetup {
    netService.listening(ListeningRequest()).futureValue shouldBe Right(ListeningResponse(true))
  }

  it should "return version response" in new TestSetup {
    netService.version(VersionRequest()).futureValue shouldBe Right(VersionResponse("1"))
  }

  trait TestSetup {
    implicit val system = ActorSystem("Testsystem")

    val testRef = TestProbe().ref

    val peerManager = TestProbe()

    val nodeStatus = NodeStatus(crypto.generateKeyPair(secureRandom), ServerStatus.Listening(new InetSocketAddress(9000)),
      discoveryStatus = ServerStatus.NotListening)
    val netService = new NetService(Agent(nodeStatus), peerManager.ref, NetServiceConfig(5.seconds))
  }

} 
Example 20
Source File: NetworkDeviceConnection.scala    From slide-desktop   with GNU General Public License v2.0 5 votes vote down vote up
package connections.network

import java.io.{IOException, ObjectInputStream}
import java.net.InetSocketAddress

import connections.BaseDeviceConnection
import slide.Const

class NetworkDeviceConnection(val ip: String) extends BaseDeviceConnection {

    private val inetAddress: InetSocketAddress = new InetSocketAddress(ip, Const.NET_PORT)

    super.socket.connect(inetAddress, 4000)

    private val input = new ObjectInputStream(socket.getInputStream)

    @throws[IOException]
    override def connect(): Boolean = {
        this.start()
    }

    @throws[IOException]
    @throws[ClassNotFoundException]
    override def nextMessage(): Array[Short] = {
        try {
            input.readObject.asInstanceOf[Array[Short]]
        } catch {
            case e: ClassNotFoundException => new Array[Short](1)
        }
    }

    override def close(): Unit = {
        stopRunning()
        input.close()
        socket.close()
    }
} 
Example 21
Source File: package.scala    From zio-metrics   with Apache License 2.0 5 votes vote down vote up
package zio.metrics.dropwizard

import zio.{ Has, Layer, Task, ZLayer }
import java.util.concurrent.TimeUnit
import java.io.File
import java.util.Locale
import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit
import org.slf4j.LoggerFactory
import java.{ util => ju }
import java.io.File

package object reporters {

  import com.codahale.metrics.MetricRegistry
  import com.codahale.metrics.MetricFilter
  import com.codahale.metrics.graphite.Graphite
  import com.codahale.metrics.graphite.GraphiteReporter
  import com.codahale.metrics.ConsoleReporter
  import com.codahale.metrics.Slf4jReporter
  import com.codahale.metrics.CsvReporter
  import com.codahale.metrics.jmx.JmxReporter
  import com.codahale.metrics.Reporter

  type Reporters = Has[Reporters.Service]

  object Reporters {
    trait Service {
      def jmx(r: MetricRegistry): Task[JmxReporter]

      def console(r: MetricRegistry): Task[ConsoleReporter]

      def slf4j(r: MetricRegistry, duration: Int, unit: TimeUnit, loggerName: String): Task[Slf4jReporter]

      def csv(r: MetricRegistry, file: File, locale: Locale): Task[Reporter]

      def graphite(r: MetricRegistry, host: String, port: Int, prefix: String): Task[GraphiteReporter]
    }

    val live: Layer[Nothing, Reporters] = ZLayer.succeed(new Service {

      def jmx(r: MetricRegistry): zio.Task[JmxReporter] = Task(JmxReporter.forRegistry(r).build())

      def console(r: MetricRegistry): Task[ConsoleReporter] = Task(
        ConsoleReporter
          .forRegistry(r)
          .convertRatesTo(TimeUnit.SECONDS)
          .convertDurationsTo(TimeUnit.MILLISECONDS)
          .build()
      )

      def slf4j(r: MetricRegistry, duration: Int, unit: TimeUnit, loggerName: String): Task[Slf4jReporter] =
        Task(
          Slf4jReporter
            .forRegistry(r)
            .outputTo(LoggerFactory.getLogger(loggerName))
            .convertRatesTo(TimeUnit.SECONDS)
            .convertDurationsTo(TimeUnit.MILLISECONDS)
            .build()
        )

      def csv(r: MetricRegistry, file: File, locale: ju.Locale): zio.Task[Reporter] = Task(
        CsvReporter
          .forRegistry(r)
          .formatFor(locale)
          .convertRatesTo(TimeUnit.SECONDS)
          .convertDurationsTo(TimeUnit.MILLISECONDS)
          .build(file)
      )

      def graphite(r: MetricRegistry, host: String, port: Int, prefix: String): zio.Task[GraphiteReporter] =
        Task {
          val graphite = new Graphite(new InetSocketAddress(host, port))
          GraphiteReporter
            .forRegistry(r)
            .prefixedWith(prefix)
            .convertRatesTo(TimeUnit.SECONDS)
            .convertDurationsTo(TimeUnit.MILLISECONDS)
            .filter(MetricFilter.ALL)
            .build(graphite)
        }
    })
  }
} 
Example 22
Source File: package.scala    From zio-metrics   with Apache License 2.0 5 votes vote down vote up
package zio.metrics.prometheus

import zio.{ Has, Layer, Task, ZLayer }

import io.prometheus.client.CollectorRegistry
import io.prometheus.client.exporter.{ HTTPServer, PushGateway }
import io.prometheus.client.bridge.Graphite
import io.prometheus.client.exporter.common.TextFormat
import io.prometheus.client.exporter.HttpConnectionFactory
import io.prometheus.client.exporter.BasicAuthHttpConnectionFactory
import io.prometheus.client.hotspot.DefaultExports

import java.net.InetSocketAddress
import java.io.StringWriter

package object exporters {

  type Exporters = Has[Exporters.Service]

  object Exporters {
    trait Service {
      def http(r: CollectorRegistry, port: Int): Task[HTTPServer]

      def graphite(r: CollectorRegistry, host: String, port: Int, intervalSeconds: Int): Task[Thread]

      def pushGateway(
        r: CollectorRegistry,
        hots: String,
        port: Int,
        jobName: String,
        user: Option[String],
        password: Option[String],
        httpConnectionFactory: Option[HttpConnectionFactory]
      ): Task[Unit]

      def write004(r: CollectorRegistry): Task[String]

      def initializeDefaultExports(r: CollectorRegistry): Task[Unit]
    }

    val live: Layer[Nothing, Exporters] = ZLayer.succeed(new Service {
      def http(r: CollectorRegistry, port: Int): zio.Task[HTTPServer] =
        Task {
          new HTTPServer(new InetSocketAddress(port), r)
        }

      def graphite(r: CollectorRegistry, host: String, port: Int, intervalSeconds: Int): Task[Thread] =
        Task {
          val g = new Graphite(host, port)
          g.start(r, intervalSeconds)
        }

      def pushGateway(
        r: CollectorRegistry,
        host: String,
        port: Int,
        jobName: String,
        user: Option[String],
        password: Option[String],
        httpConnectionFactory: Option[HttpConnectionFactory]
      ): Task[Unit] =
        Task {
          val pg = new PushGateway(s"$host:$port")

          if (user.isDefined)
            for {
              u <- user
              p <- password
            } yield pg.setConnectionFactory(new BasicAuthHttpConnectionFactory(u, p))
          else if (httpConnectionFactory.isDefined)
            for {
              conn <- httpConnectionFactory
            } yield pg.setConnectionFactory(conn)

          pg.pushAdd(r, jobName)
        }

      def write004(r: CollectorRegistry): Task[String] =
        Task {
          val writer = new StringWriter
          TextFormat.write004(writer, r.metricFamilySamples)
          writer.toString
        }

      def initializeDefaultExports(r: CollectorRegistry): Task[Unit] =
        Task(DefaultExports.initialize())
    })

    def stopHttp(server: HTTPServer): Task[Unit] =
      Task(server.stop())
  }

} 
Example 23
Source File: DEXExtension.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.grpc.integration

import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

import com.wavesplatform.dex.grpc.integration.services._
import com.wavesplatform.dex.grpc.integration.settings.DEXExtensionSettings
import com.wavesplatform.extensions.{Extension, Context => ExtensionContext}
import com.wavesplatform.utils.ScorexLogging
import io.grpc.Server
import io.grpc.netty.NettyServerBuilder
import monix.execution.{ExecutionModel, Scheduler}
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ArbitraryTypeReader._
import net.ceedubs.ficus.readers.NameMapper

import scala.concurrent.Future

class DEXExtension(context: ExtensionContext) extends Extension with ScorexLogging {

  @volatile
  private var server: Server                            = _
  private var apiService: WavesBlockchainApiGrpcService = _

  implicit val chosenCase: NameMapper = net.ceedubs.ficus.readers.namemappers.implicits.hyphenCase
  implicit private val apiScheduler: Scheduler = Scheduler(
    ec = context.actorSystem.dispatchers.lookup("akka.actor.waves-dex-grpc-scheduler"),
    executionModel = ExecutionModel.AlwaysAsyncExecution
  )

  override def start(): Unit = {
    val settings    = context.settings.config.as[DEXExtensionSettings]("waves.dex.grpc.integration")
    val bindAddress = new InetSocketAddress(settings.host, settings.port)
    apiService = new WavesBlockchainApiGrpcService(context, settings.balanceChangesBatchLinger)
    server = NettyServerBuilder
      .forAddress(bindAddress)
      .permitKeepAliveWithoutCalls(true)
      .permitKeepAliveTime(500, TimeUnit.MILLISECONDS)
      .addService(WavesBlockchainApiGrpc.bindService(apiService, apiScheduler))
      .build()
      .start()

    log.info(s"gRPC DEX extension was bound to $bindAddress")
  }

  override def shutdown(): Future[Unit] = {
    log.info("Shutting down gRPC DEX extension")
    if (server != null) server.shutdownNow()
    Future.successful(())
  }
} 
Example 24
Source File: NodeApiOps.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.api.node

import java.net.InetSocketAddress

import cats.Functor
import cats.syntax.functor._
import com.typesafe.config.Config
import com.wavesplatform.dex.domain.account.Address
import com.wavesplatform.dex.domain.asset.Asset
import com.wavesplatform.dex.domain.asset.Asset.{IssuedAsset, Waves}
import com.wavesplatform.dex.domain.bytes.ByteStr
import com.wavesplatform.dex.it.api.responses.node.{ActivationStatusResponse, AssetBalanceResponse, ConnectedPeersResponse, WavesBalanceResponse}
import com.wavesplatform.dex.it.fp.CanExtract
import com.wavesplatform.wavesj.Transaction

object NodeApiOps {

  final implicit class ExplicitGetNodeApiOps[F[_]: Functor](val self: NodeApi[F])(implicit E: CanExtract[F]) {

    import E.{extract => explicitGet}

    def wavesBalance(address: Address): F[WavesBalanceResponse]                     = explicitGet(self.tryWavesBalance(address))
    def assetBalance(address: Address, asset: IssuedAsset): F[AssetBalanceResponse] = explicitGet(self.tryAssetBalance(address, asset))

    def balance(address: Address, asset: Asset): F[Long] = asset match {
      case Waves          => wavesBalance(address).map(_.balance)
      case x: IssuedAsset => assetBalance(address, x).map(_.balance)
    }

    def connect(toNode: InetSocketAddress): F[Unit] = explicitGet(self.tryConnect(toNode))

    def connectedPeers: F[ConnectedPeersResponse] = explicitGet(self.tryConnectedPeers)

    def broadcast(tx: Transaction): F[Unit] = explicitGet(self.tryBroadcast(tx))

    def transactionInfo(id: ByteStr): F[Option[Transaction]] = self.tryTransactionInfo(id).map {
      case Right(r) => Some(r)
      case Left(e) =>
        if (e.error == 311) None // node's ApiError.TransactionDoesNotExist.id
        else throw new RuntimeException(s"Unexpected error: $e")
    }

    def currentHeight: F[Int] = explicitGet(self.tryCurrentHeight)

    def activationStatus: F[ActivationStatusResponse] = explicitGet(self.tryActivationStatus)
    def config: F[Config]                             = explicitGet(self.tryConfig)
  }
} 
Example 25
Source File: RandomPortUtil.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.utils

import java.net.InetSocketAddress
import java.nio.channels.ServerSocketChannel

trait RandomPortUtil {

  def temporaryServerAddress(interface: String = "127.0.0.1"): InetSocketAddress = {
    val serverSocket = ServerSocketChannel.open()
    try {
      serverSocket.socket.bind(new InetSocketAddress(interface, 0))
      val port = serverSocket.socket.getLocalPort
      new InetSocketAddress(interface, port)
    } finally serverSocket.close()
  }

  def temporaryServerHostnameAndPort(interface: String = "127.0.0.1"): (String, Int) = {
    val socketAddress = temporaryServerAddress(interface)
    socketAddress.getHostName -> socketAddress.getPort
  }

  def temporaryServerPort(interface: String = "127.0.0.1"): Int =
    temporaryServerHostnameAndPort(interface)._2
}

object RandomPortUtil extends RandomPortUtil 
Example 26
Source File: ProxyMessageHandler.scala    From spark-riak-connector   with Apache License 2.0 5 votes vote down vote up
package com.basho.riak.stub

import java.net.InetSocketAddress
import java.nio.ByteBuffer
import java.nio.channels._

import com.basho.riak.client.core.RiakMessage
import com.basho.riak.client.core.util.HostAndPort
import shaded.com.basho.riak.protobuf.RiakKvPB
import shaded.com.basho.riak.protobuf.RiakMessageCodes._
import shaded.com.google.protobuf.ByteString

import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer


class ProxyMessageHandler(hostAndPort: HostAndPort) extends RiakMessageHandler {

  private final val riakAddress = new InetSocketAddress(hostAndPort.getHost, hostAndPort.getPort)

  override def handle(context: ClientHandler.Context, input: RiakMessage): Iterable[RiakMessage] = input.getCode match {
    // coverage plan received from real Riak node must be modified to replace real node's host and port with proxy
    case MSG_CoverageReq => forwardAndTransform(context, input) { output =>
      val resp = RiakKvPB.RpbCoverageResp.parseFrom(output.getData)
      val modified = RiakKvPB.RpbCoverageResp.newBuilder(resp)
        .clearEntries()
        .addAllEntries(resp.getEntriesList.map { ce =>
          val ceBuilder = RiakKvPB.RpbCoverageEntry.newBuilder(ce)
          if (ce.getIp.toStringUtf8 == hostAndPort.getHost && ce.getPort == hostAndPort.getPort) {
            val localAddress = context.channel.asInstanceOf[NetworkChannel]
              .getLocalAddress.asInstanceOf[InetSocketAddress]
            ceBuilder.setIp(ByteString.copyFromUtf8(localAddress.getHostString))
            ceBuilder.setPort(localAddress.getPort)
          }
          ceBuilder.build()
        }).build()
      new RiakMessage(output.getCode, modified.toByteArray)
    }
    case _ => forwardMessage(context, input)
  }

  private def forwardMessage(context: ClientHandler.Context, input: RiakMessage): Iterable[RiakMessage] = {
    def readRiakResponse(channel: SocketChannel, out: List[RiakMessage] = Nil): Iterable[RiakMessage] = out match {
      case _ if !isDoneReceived(out, input) => readRiakResponse(channel, out ++ readSocket(channel))
      case _ => out
    }

    val channel = SocketChannel.open(riakAddress)
    try {
      // forward request to real Riak node
      assert(channel.write(RiakMessageEncoder.encode(input)) > 0)

      // read response for forwarded request from real Riak node
      readRiakResponse(channel)
    } finally {
      channel.close()
    }
  }

  private def readSocket(channel: SocketChannel): Iterable[RiakMessage] = {
    var accumulator = ByteBuffer.allocateDirect(0)

    var out = ArrayBuffer[RiakMessage]()
    while (out.isEmpty || accumulator.hasRemaining) {
      // try to parse riak message from bytes in accumulator buffer
      RiakMessageEncoder.decode(accumulator) match {
        case Some(x) =>
          accumulator = accumulator.slice()
          out += x
        case None =>
          // read next chunk of data from channel and add it into accumulator
          val in = ByteBuffer.allocateDirect(1024) // scalastyle:ignore
          channel.read(in)
          accumulator = ByteBuffer
            .allocate(accumulator.rewind().limit() + in.flip().limit())
            .put(accumulator)
            .put(in)
          accumulator.rewind()
          in.clear()
      }
    }
    out
  }

  private def isDoneReceived(out: Iterable[RiakMessage], input: RiakMessage): Boolean = input.getCode match {
    case MSG_IndexReq => out.foldLeft[Boolean](false)((a, m) => a || RiakKvPB.RpbIndexResp.parseFrom(m.getData).getDone)
    case _ => out.nonEmpty
  }

  private def forwardAndTransform(context: ClientHandler.Context, input: RiakMessage
                                 )(transform: RiakMessage => RiakMessage
                                 ): Iterable[RiakMessage] = forwardMessage(context, input).map(transform(_))

  override def onRespond(input: RiakMessage, output: Iterable[RiakMessage]): Unit = {}
} 
Example 27
Source File: RiakNodeStub.scala    From spark-riak-connector   with Apache License 2.0 5 votes vote down vote up
package com.basho.riak.stub

import java.net.InetSocketAddress
import java.nio.ByteBuffer
import java.nio.channels.{AsynchronousCloseException, AsynchronousServerSocketChannel, AsynchronousSocketChannel, CompletionHandler}

import com.basho.riak.client.core.util.HostAndPort
import com.basho.riak.stub.RiakNodeStub._
import org.slf4j.LoggerFactory

class RiakNodeStub(val host: String, val port: Int, messageHandler: RiakMessageHandler) {

  private final val localAddress = new InetSocketAddress(host, port)
  private final val clientHandler = new ClientHandler(messageHandler)

  private var serverChannel: AsynchronousServerSocketChannel = _
  private var clients: List[AsynchronousSocketChannel] = Nil

  def start(): HostAndPort = {
    serverChannel = AsynchronousServerSocketChannel.open()
    require(serverChannel.isOpen)

    serverChannel.bind(localAddress)
    serverChannel.accept(serverChannel, new CompletionHandler[AsynchronousSocketChannel, AsynchronousServerSocketChannel]() {
      override def completed(client: AsynchronousSocketChannel, server: AsynchronousServerSocketChannel): Unit = {
        logger.info(s"Incoming connection: ${SocketUtils.serverConnectionAsStr(client)}")
        this.synchronized {
          clients = client :: clients
        }

        val buffer = ByteBuffer.allocateDirect(1024) // scalastyle:ignore
        client.read(buffer, (client, buffer), clientHandler)

        server.accept(server, this)
      }

      override def failed(exc: Throwable, serverChannel: AsynchronousServerSocketChannel): Unit = exc match {
        case _: AsynchronousCloseException =>
        case _ => logger.error(s"Something went wrong:  ${serverChannel.toString}", exc);
      }
    })

    HostAndPort.fromParts(
      serverChannel.getLocalAddress.asInstanceOf[InetSocketAddress].getHostString,
      serverChannel.getLocalAddress.asInstanceOf[InetSocketAddress].getPort)
  }

  def stop(): Unit = this.synchronized {
    Option(serverChannel).foreach(_.close)
    clients.foreach(clientHandler.disconnectClient)
  }
}

object RiakNodeStub {
  val logger = LoggerFactory.getLogger(classOf[RiakNodeStub])
  final val DEFAULT_HOST = "localhost"

  def apply(host: String, port: Int, messageHandler: RiakMessageHandler): RiakNodeStub = new RiakNodeStub(host, port, messageHandler)

  def apply(port: Int, messageHandler: RiakMessageHandler): RiakNodeStub = RiakNodeStub(DEFAULT_HOST, port, messageHandler)

  def apply(messageHandler: RiakMessageHandler): RiakNodeStub = RiakNodeStub(DEFAULT_HOST, 0, messageHandler)
} 
Example 28
Source File: SocketStreamingDataSource.scala    From spark-riak-connector   with Apache License 2.0 5 votes vote down vote up
package com.basho.riak.spark.streaming

import java.net.InetSocketAddress
import java.nio.channels.{AsynchronousCloseException, AsynchronousServerSocketChannel, AsynchronousSocketChannel, CompletionHandler}

import com.basho.riak.stub.SocketUtils
import org.apache.spark.Logging

class SocketStreamingDataSource extends Logging {

  private var serverChannel: AsynchronousServerSocketChannel = _
  private var clientChannel: AsynchronousSocketChannel = _

  def start(writeToSocket: AsynchronousSocketChannel => Unit): Int = {
    serverChannel = AsynchronousServerSocketChannel.open()
    require(serverChannel.isOpen)

    serverChannel.bind(new InetSocketAddress(0))
    serverChannel.accept(serverChannel, new CompletionHandler[AsynchronousSocketChannel, AsynchronousServerSocketChannel]() {
      override def completed(client: AsynchronousSocketChannel, server: AsynchronousServerSocketChannel): Unit = {
        logInfo(s"Incoming connection: ${SocketUtils.serverConnectionAsStr(client)}")
        clientChannel = client

        writeToSocket(client)

        client.isOpen match {
          case true =>
            val connectionString = SocketUtils.serverConnectionAsStr(client)
            client.shutdownInput()
            client.shutdownOutput()
            client.close()
            logInfo(s"Client $connectionString was gracefully disconnected")
          case false => // client is already closed - do nothing
        }
      }

      override def failed(exc: Throwable, serverChannel: AsynchronousServerSocketChannel): Unit = exc match {
        case _: AsynchronousCloseException =>
        case _ => logError(s"Something went wrong:  ${serverChannel.toString}", exc);
      }
    })

    serverChannel.getLocalAddress.asInstanceOf[InetSocketAddress].getPort
  }

  def stop(): Unit = {
    Option(clientChannel).foreach(_.close())
    Option(serverChannel).foreach(_.close())
  }
} 
Example 29
Source File: ZookeeperLocalServer.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package it.teamdigitale.miniclusters

import java.io.File
import java.net.InetSocketAddress

import org.apache.commons.io.FileUtils
import org.apache.zookeeper.server.{ServerCnxnFactory, ZooKeeperServer}

class ZookeeperLocalServer(port: Int) {

  var zkServer: Option[ServerCnxnFactory] = None

  def start(): Unit = {
    if (zkServer.isEmpty) {

      val dataDirectory = System.getProperty("java.io.tmpdir")
      val dir = new File(dataDirectory, "zookeeper")
      println(dir.toString)
      if (dir.exists())
        FileUtils.deleteDirectory(dir)

      try {
        val tickTime = 5000
        val server = new ZooKeeperServer(dir.getAbsoluteFile, dir.getAbsoluteFile, tickTime)
        val factory = ServerCnxnFactory.createFactory
        factory.configure(new InetSocketAddress("0.0.0.0", port), 1024)
        factory.startup(server)
        println("ZOOKEEPER server up!!")
        zkServer = Some(factory)

      } catch {
        case ex: Exception => System.err.println(s"Error in zookeeper server: ${ex.printStackTrace()}")
      } finally { dir.deleteOnExit() }
    } else println("ZOOKEEPER is already up")
  }

  def stop() = {
    if (zkServer.isDefined) {
      zkServer.get.shutdown()
    }
    println("ZOOKEEPER server stopped")
  }
} 
Example 30
Source File: AppServer.scala    From keycloak-benchmark   with Apache License 2.0 5 votes vote down vote up
package org.jboss.perf

import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.LongAdder
import javax.ws.rs.core.Response
import javax.ws.rs.{GET, POST, Path}

import com.sun.net.httpserver.HttpServer
import org.jboss.resteasy.plugins.server.sun.http.HttpContextBuilder
import org.keycloak.constants.AdapterConstants


object AppServer {
  private val address: Array[String] = Options.app.split(":")
  private val httpServer = HttpServer.create(new InetSocketAddress(address(0), address(1).toInt), 100)
  private val contextBuilder = new HttpContextBuilder()
  contextBuilder.getDeployment().getActualResourceClasses().add(classOf[AppServer])
  private val context = contextBuilder.bind(httpServer)

  private val logouts = new LongAdder();
  private val versions = new LongAdder();
  private val pushNotBefores = new LongAdder();
  private val queryBearerTokens = new LongAdder();
  private val testAvailables = new LongAdder();

  def main(args: Array[String]): Unit = {
    httpServer.start()
    val timeout = Options.rampUp + Options.duration + Options.rampDown + 10;
    Thread.sleep(TimeUnit.SECONDS.toMillis(timeout))
    httpServer.stop(0);
    printf("AppServer stats:%n%8d logout%n%8d version%n%8d pushNotBefore%n%8d queryBearerToken%n%8d testAvailables%n",
      logouts.longValue(), versions.longValue(), pushNotBefores.longValue(), queryBearerTokens.longValue(), testAvailables.longValue())
  }
}

@Path("/admin")
class AppServer {

  @GET
  @POST
  @Path(AdapterConstants.K_LOGOUT)
  def logout(): Response = {
    AppServer.logouts.increment()
    Response.ok().build()
  }

  @GET
  @POST
  @Path(AdapterConstants.K_VERSION)
  def version(): Response = {
    AppServer.versions.increment()
    Response.ok().build()
  }

  @GET
  @POST
  @Path(AdapterConstants.K_PUSH_NOT_BEFORE)
  def pushNotBefore(): Response = {
    AppServer.pushNotBefores.increment()
    Response.ok().build()
  }

  @GET
  @POST
  @Path(AdapterConstants.K_QUERY_BEARER_TOKEN)
  def queryBearerToken(): Response = {
    AppServer.queryBearerTokens.increment()
    Response.ok().build()
  }

  @GET
  @POST
  @Path(AdapterConstants.K_TEST_AVAILABLE)
  def testAvailable(): Response = {
    AppServer.testAvailables.increment()
    Response.ok().build()
  }

} 
Example 31
Source File: Server.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.server

import java.net.{ InetSocketAddress, InetAddress }
import java.nio.channels.AsynchronousChannelGroup
import java.util.concurrent.Executors

import scala.concurrent.duration._

import cats.implicits._
import cats.effect.{ IO, IOApp, ExitCode, Resource, Blocker }

import fs2.{ Stream, Chunk }
import fs2.io.tcp

import scodec.bits.BitVector
import scodec.Codec

import dev.tauri.seals.scodec.Codecs._

import com.example.proto._

object Server extends IOApp {

  final val bufferSize = 32 * 1024
  final val timeout = Some(2.seconds)
  final val maxClients = 200
  final val port = 8080

  val rnd = new scala.util.Random

  def addr(port: Int): InetSocketAddress =
    new InetSocketAddress(InetAddress.getLoopbackAddress, port)

  override def run(args: List[String]): IO[ExitCode] = {
    Blocker[IO].use { bl =>
      tcp.SocketGroup[IO](bl).use { sg =>
        serve(port, sg).compile.drain.as(ExitCode.Success)
      }
    }
  }

  def serve(port: Int, sg: tcp.SocketGroup): Stream[IO, Unit] = {
    Stream.resource(sg.serverResource[IO](addr(port))).flatMap {
      case (localAddr, sockets) =>
        val s = sockets.map { socket =>
          Stream.resource(socket).flatMap { socket =>
            val bvs: Stream[IO, BitVector] = socket.reads(bufferSize, timeout).chunks.map(ch => BitVector.view(ch.toArray))
            val tsk: IO[BitVector] = bvs.compile.toVector.map(_.foldLeft(BitVector.empty)(_ ++ _))
            val request: IO[Request] = tsk.flatMap { bv =>
              Codec[Request].decode(bv).fold(
                err => IO.raiseError(new Exception(err.toString)),
                result => IO.pure(result.value)
              )
            }
            val response: IO[Response] = request.flatMap(logic)
            val encoded: Stream[IO, Byte] = Stream.eval(response)
              .map(r => Codec[Response].encode(r).require)
              .flatMap { bv => Stream.chunk(Chunk.bytes(bv.bytes.toArray)) }
            encoded.through(socket.writes(timeout)).onFinalize(socket.endOfOutput)
          }
        }
        s.parJoin[IO, Unit](maxClients)
    }
  }

  def logic(req: Request): IO[Response] = req match {
    case RandomNumber(min, max) =>
      if (min < max) {
        IO {
          val v = rnd.nextInt(max - min + 1) + min
          Number(v)
        }
      } else if (min === max) {
        IO.pure(Number(min))
      } else {
        IO.raiseError(new IllegalArgumentException("min must not be greater than max"))
      }
    case ReSeed(s) =>
      IO {
        rnd.setSeed(s)
        Ok
      }
  }
} 
Example 32
Source File: Client.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.lib

import java.net.{ InetAddress, InetSocketAddress }

import scala.concurrent.Future
import scala.concurrent.Await
import scala.concurrent.duration._

import cats.effect.{ IO, ContextShift }

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl._
import akka.util.{ ByteString }

import scodec.bits.BitVector
import scodec.stream.{ StreamEncoder, StreamDecoder }

import fs2.interop.reactivestreams._

import dev.tauri.seals.scodec.StreamCodecs._
import dev.tauri.seals.scodec.StreamCodecs.{ pipe => decPipe }

import Protocol.v1.{ Request, Response, Seed, Random }

object Client {

  val reqCodec: StreamEncoder[Request] = streamEncoderFromReified[Request]
  val resCodec: StreamDecoder[Response] = streamDecoderFromReified[Response]

  def main(args: Array[String]): Unit = {
    implicit val sys: ActorSystem = ActorSystem("ClientSystem")
    implicit val mat: Materializer = ActorMaterializer()
    try {
      val resp = Await.result(client(1234), 10.seconds)
      println(resp)
    } finally {
      sys.terminate()
    }
  }

  def client(port: Int)(implicit sys: ActorSystem, mat: Materializer): Future[Vector[Response]] = {
    val addr = new InetSocketAddress(InetAddress.getLoopbackAddress, port)
    Tcp().outgoingConnection(addr).joinMat(logic)(Keep.right).run()
  }

  def logic(implicit sys: ActorSystem): Flow[ByteString, ByteString, Future[Vector[Response]]] = {

    implicit val cs: ContextShift[IO] = IO.contextShift(sys.dispatcher)

    val requests = fs2.Stream(Seed(0xabcdL), Random(1, 100)).covary[IO]
    val source = Source
      .fromPublisher(reqCodec.encode(requests).toUnicastPublisher())
      .map(bv => ByteString.fromArrayUnsafe(bv.toByteArray))

    // TODO: this would be much less ugly, if we had a decoder `Flow`
    val buffer = fs2.concurrent.Queue.unbounded[IO, Option[BitVector]].unsafeRunSync()
    val decode: Flow[ByteString, Response, NotUsed] = Flow.fromSinkAndSource(
      Sink.onComplete { _ =>
        buffer.enqueue1(None).unsafeRunSync()
      }.contramap[ByteString] { x =>
        buffer.enqueue1(Some(BitVector.view(x.toArray))).unsafeRunSync()
      },
      Source.fromPublisher(buffer
        .dequeue
        .unNoneTerminate
        .through(decPipe[IO, Response])
        .toUnicastPublisher()
      )
    )
    val sink: Sink[ByteString, Future[Vector[Response]]] = decode.toMat(
      Sink.fold(Vector.empty[Response])(_ :+ _)
    )(Keep.right)

    Flow.fromSinkAndSourceMat(sink, source)(Keep.left)
  }
} 
Example 33
Source File: Server.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.lib

import java.net.{ InetSocketAddress, InetAddress }

import scala.concurrent.duration._

import cats.implicits._
import cats.effect.{ IO, IOApp, ExitCode, Blocker }

import fs2.{ Stream, Chunk }
import fs2.io.tcp

import scodec.bits.BitVector
import scodec.stream.{ StreamEncoder, StreamDecoder }

import dev.tauri.seals.scodec.StreamCodecs._
import dev.tauri.seals.scodec.StreamCodecs.{ pipe => decPipe }

import Protocol.v1.{ Request, Response, Random, Seed, RandInt, Seeded }

object Server extends IOApp {

  final val bufferSize = 32 * 1024
  final val timeout = Some(2.seconds)
  final val maxClients = 200

  val rnd = new scala.util.Random

  def addr(port: Int): InetSocketAddress =
    new InetSocketAddress(InetAddress.getLoopbackAddress, port)

  val reqCodec: StreamDecoder[Request] = streamDecoderFromReified[Request]
  val resCodec: StreamEncoder[Response] = streamEncoderFromReified[Response]

  override def run(args: List[String]): IO[ExitCode] = {
    Blocker[IO].use { bl =>
      tcp.SocketGroup[IO](bl).use { sg =>
        serve(1234, sg).compile.drain.as(ExitCode.Success)
      }
    }
  }

  def serve(port: Int, sg: tcp.SocketGroup): Stream[IO, Unit] =
    serveAddr(port, sg).as(())

  def serveAddr(port: Int, sg: tcp.SocketGroup): Stream[IO, InetSocketAddress] = {
    Stream.resource(sg.serverResource[IO](addr(port))).flatMap {
      case (localAddr, sockets) =>
        val x = sockets.flatMap { socket =>
          Stream.resource(socket).map { socket =>
            val bvs: Stream[IO, BitVector] = socket.reads(bufferSize, timeout).chunks.map(ch => BitVector.view(ch.toArray))
            val requests: Stream[IO, Request] = bvs.through(decPipe[IO, Request])
            val responses: Stream[IO, Response] = requests.flatMap(req => Stream.eval(logic(req)))
            val encoded: Stream[IO, Byte] = resCodec.encode(responses).flatMap { bv =>
              Stream.chunk(Chunk.bytes(bv.bytes.toArray))
            }

            encoded.through(socket.writes(timeout)).onFinalize(socket.endOfOutput)
          }
        }

        Stream.emit(localAddr) ++ x.parJoin(maxClients).drain
    }
  }

  def logic(req: Request): IO[Response] = req match {
    case Random(min, max) =>
      if (min < max) {
        IO {
          val v = rnd.nextInt(max - min + 1) + min
          RandInt(v)
        }
      } else if (min === max) {
        IO.pure(RandInt(min))
      } else {
        IO.raiseError(new IllegalArgumentException("min must not be greater than max"))
      }
    case Seed(s) =>
      IO {
        rnd.setSeed(s)
        Seeded
      }
  }
} 
Example 34
Source File: CassandraServerSpecLike.scala    From Spark2Cassandra   with Apache License 2.0 5 votes vote down vote up
package com.github.jparkie.spark.cassandra

import java.net.{ InetAddress, InetSocketAddress }

import com.datastax.driver.core.Session
import com.datastax.spark.connector.cql.CassandraConnector
import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.scalatest.{ BeforeAndAfterAll, Suite }

trait CassandraServerSpecLike extends BeforeAndAfterAll { this: Suite =>
  // Remove protected modifier because of SharedSparkContext.
  override def beforeAll(): Unit = {
    super.beforeAll()

    EmbeddedCassandraServerHelper.startEmbeddedCassandra()
  }

  // Remove protected modifier because of SharedSparkContext.
  override def afterAll(): Unit = {
    EmbeddedCassandraServerHelper.cleanEmbeddedCassandra()

    super.afterAll()
  }

  def getClusterName: String = {
    EmbeddedCassandraServerHelper.getClusterName
  }

  def getHosts: Set[InetAddress] = {
    val temporaryAddress =
      new InetSocketAddress(EmbeddedCassandraServerHelper.getHost, EmbeddedCassandraServerHelper.getNativeTransportPort)
        .getAddress

    Set(temporaryAddress)
  }

  def getNativeTransportPort: Int = {
    EmbeddedCassandraServerHelper.getNativeTransportPort
  }

  def getRpcPort: Int = {
    EmbeddedCassandraServerHelper.getRpcPort
  }

  def getCassandraConnector: CassandraConnector = {
    CassandraConnector(hosts = getHosts, port = getNativeTransportPort)
  }

  def createKeyspace(session: Session, keyspace: String): Unit = {
    session.execute(
      s"""CREATE KEYSPACE "$keyspace"
          |WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };
       """.stripMargin
    )
  }
} 
Example 35
Source File: KafkaServer.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package alpakka.env

import java.io.File
import java.net.InetSocketAddress
import java.nio.file.{Files, Paths}
import java.util.Properties

import kafka.server.{KafkaConfig, KafkaServerStartable}
import org.apache.commons.io.FileUtils
import org.apache.zookeeper.server.quorum.QuorumPeerConfig
import org.apache.zookeeper.server.{ServerConfig, ZooKeeperServerMain}


object KafkaServer extends App {

  val zookeeperPort = 2181

  val kafkaLogs = "/tmp/kafka-logs"
  val kafkaLogsPath = Paths.get(kafkaLogs)

  // See: https://stackoverflow.com/questions/59592518/kafka-broker-doesnt-find-cluster-id-and-creates-new-one-after-docker-restart/60864763#comment108382967_60864763
  def fix25Behaviour() = {
    val fileWithConflictingContent = kafkaLogsPath.resolve("meta.properties").toFile
    if (fileWithConflictingContent.exists())  FileUtils.forceDelete(fileWithConflictingContent)
  }

  def removeKafkaLogs(): Unit = {
    if (kafkaLogsPath.toFile.exists()) FileUtils.forceDelete(kafkaLogsPath.toFile)
  }

  // Keeps the persistent data
  fix25Behaviour()
  // If everything fails
  //removeKafkaLogs()

  val quorumConfiguration = new QuorumPeerConfig {
    // Since we do not run a cluster, we are not interested in zookeeper data
    override def getDataDir: File = Files.createTempDirectory("zookeeper").toFile
    override def getDataLogDir: File = Files.createTempDirectory("zookeeper-logs").toFile
    override def getClientPortAddress: InetSocketAddress = new InetSocketAddress(zookeeperPort)
  }

  class StoppableZooKeeperServerMain extends ZooKeeperServerMain {
    def stop(): Unit = shutdown()
  }

  val zooKeeperServer = new StoppableZooKeeperServerMain()

  val zooKeeperConfig = new ServerConfig()
  zooKeeperConfig.readFrom(quorumConfiguration)

  val zooKeeperThread = new Thread {
    override def run(): Unit = zooKeeperServer.runFromConfig(zooKeeperConfig)
  }

  zooKeeperThread.start()

  val kafkaProperties = new Properties()
  kafkaProperties.put("zookeeper.connect", s"localhost:$zookeeperPort")
  kafkaProperties.put("broker.id", "0")
  kafkaProperties.put("offsets.topic.replication.factor", "1")
  kafkaProperties.put("log.dirs", kafkaLogs)
  kafkaProperties.put("delete.topic.enable", "true")
  kafkaProperties.put("group.initial.rebalance.delay.ms", "0")
  kafkaProperties.put("transaction.state.log.min.isr", "1")
  kafkaProperties.put("transaction.state.log.replication.factor", "1")
  kafkaProperties.put("zookeeper.connection.timeout.ms", "6000")
  kafkaProperties.put("num.partitions", "10")

  val kafkaConfig = KafkaConfig.fromProps(kafkaProperties)

  val kafka = new KafkaServerStartable(kafkaConfig)

  println("About to start...")
  kafka.startup()

  scala.sys.addShutdownHook{
    println("About to shutdown...")
    kafka.shutdown()
    kafka.awaitShutdown()
    zooKeeperServer.stop()
  }

  zooKeeperThread.join()
} 
Example 36
Source File: KafkaTestUtils.scala    From spark-kafka-writer   with Apache License 2.0 5 votes vote down vote up
package com.github.benfradet.spark.kafka.writer

import java.io.File
import java.net.InetSocketAddress
import java.util.Arrays.asList
import java.util.Properties

import kafka.server.{KafkaConfig, KafkaServerStartable}
import org.apache.kafka.clients.admin.{AdminClient, NewTopic}
import org.apache.zookeeper.server.{NIOServerCnxnFactory, ZooKeeperServer}

import scala.util.Random

class KafkaTestUtils {
  // zk
  private val zkHost = "localhost"
  private val zkPort = 2181
  private var zk: EmbeddedZookeeper = _
  private var zkReady = false

  // kafka
  private val brokerHost = "localhost"
  private val brokerPort = 9092
  private var kafkaServer: KafkaServerStartable = _
  private var topicCountMap = Map.empty[String, Int]
  private var brokerReady = false
  private var kafkaAdminClient: AdminClient = _

  
  @scala.annotation.varargs
  def createTopics(topics: String*): Unit =
    for (topic <- topics) {
      kafkaAdminClient.createTopics(asList(new NewTopic(topic, 1, 1: Short)))
      Thread.sleep(1000)
      topicCountMap = topicCountMap + (topic -> 1)
    }

  private def brokerProps: Properties = {
    val props = new Properties
    props.put("broker.id", "0")
    props.put("host.name", brokerHost)
    props.put("log.dir",
      {
        val dir = System.getProperty("java.io.tmpdir") +
          "/logDir-" + new Random().nextInt(Int.MaxValue)
        val f = new File(dir)
        f.mkdirs()
        dir
      }
    )
    props.put("port", brokerPort.toString)
    props.put("zookeeper.connect", zkAddress)
    props.put("zookeeper.connection.timeout.ms", "10000")
    props.put("offsets.topic.replication.factor", "1")
    props
  }

  private class EmbeddedZookeeper(hostname: String, port: Int) {
    private val snapshotDir = {
      val f = new File(System.getProperty("java.io.tmpdir"),
        "snapshotDir-" + Random.nextInt(Int.MaxValue))
      f.mkdirs()
      f
    }
    private val logDir = {
      val f = new File(System.getProperty("java.io.tmpdir"),
        "logDir-" + Random.nextInt(Int.MaxValue))
      f.mkdirs()
      f
    }

    private val factory = {
      val zkTickTime = 500
      val zk = new ZooKeeperServer(snapshotDir, logDir, zkTickTime)
      val f = new NIOServerCnxnFactory
      val maxCnxn = 16
      f.configure(new InetSocketAddress(hostname, port), maxCnxn)
      f.startup(zk)
      f
    }

    def shutdown(): Unit = {
      factory.shutdown()
      snapshotDir.delete()
      logDir.delete()
      ()
    }
  }
} 
Example 37
Source File: PrioritiesCalculator.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.network

import java.net.InetSocketAddress

import com.typesafe.scalalogging.StrictLogging
import encry.network.PrioritiesCalculator.PeersPriorityStatus
import encry.network.PrioritiesCalculator.PeersPriorityStatus.PeersPriorityStatus._
import encry.network.PrioritiesCalculator.PeersPriorityStatus._
import encry.settings.NetworkSettings

import scala.concurrent.duration._

final case class PrioritiesCalculator(networkSettings: NetworkSettings,
                                      private val peersNetworkStatistic: Map[InetSocketAddress, (Requested, Received)])
  extends StrictLogging {

  val updatingStatisticTime: FiniteDuration = (networkSettings.deliveryTimeout._1 * networkSettings.maxDeliveryChecks).seconds

  def incrementRequest(peer: InetSocketAddress): PrioritiesCalculator = {
    val (requested, received): (Requested, Received) = peersNetworkStatistic.getOrElse(peer, (Requested(), Received()))
    val newRequested: Requested = requested.increment
    logger.debug(s"Updating request parameter from $peer. Old is ($requested, $received). New one is: ($newRequested, $received)")
    PrioritiesCalculator(networkSettings, peersNetworkStatistic.updated(peer, (newRequested, received)))
  }

  def incrementReceive(peer: InetSocketAddress): PrioritiesCalculator = {
    val (requested, received): (Requested, Received) = peersNetworkStatistic.getOrElse(peer, (Requested(), Received()))
    val newReceived: Received = received.increment
    logger.debug(s"Updating received parameter from $peer. Old is ($requested, $received). New one is: ($requested, $newReceived)")
    PrioritiesCalculator(networkSettings, peersNetworkStatistic.updated(peer, (requested, newReceived)))
  }

  def decrementRequest(peer: InetSocketAddress): PrioritiesCalculator = {
    val (requested, received): (Requested, Received) = peersNetworkStatistic.getOrElse(peer, (Requested(), Received()))
    val newRequested: Requested = requested.decrement
    logger.debug(s"Decrement request parameter from $peer. Old is ($requested, $received). New one is: ($newRequested, $received)")
    PrioritiesCalculator(networkSettings, peersNetworkStatistic.updated(peer, (newRequested, received)))
  }

  def incrementRequestForNModifiers(peer: InetSocketAddress, modifiersQty: Int): PrioritiesCalculator = {
    val (requested, received): (Requested, Received) = peersNetworkStatistic.getOrElse(peer, (Requested(), Received()))
    val newRequested: Requested = requested.incrementForN(modifiersQty)
    logger.debug(s"Updating request parameter from $peer. Old is ($requested, $received). New one is: ($newRequested, $received)")
    PrioritiesCalculator(networkSettings, peersNetworkStatistic.updated(peer, (newRequested, received)))
  }

  def accumulatePeersStatistic: (Map[InetSocketAddress, PeersPriorityStatus], PrioritiesCalculator) = {
    val updatedStatistic: Map[InetSocketAddress, PeersPriorityStatus] = peersNetworkStatistic.map {
      case (peer, (requested, received)) =>
        logger.info(s"peer: $peer: received: $received, requested: $requested")
        val priority: PeersPriorityStatus = PeersPriorityStatus.calculateStatuses(received, requested)
        peer -> priority
    }
    logger.info(s"Accumulated peers statistic. Current stats are: ${updatedStatistic.mkString(",")}")
    (updatedStatistic, PrioritiesCalculator(networkSettings))
  }
}

object PrioritiesCalculator {

  final case class AccumulatedPeersStatistic(statistic: Map[InetSocketAddress, PeersPriorityStatus])

  object PeersPriorityStatus {

    sealed trait PeersPriorityStatus
    object PeersPriorityStatus {
      case object HighPriority extends PeersPriorityStatus
      case object LowPriority extends PeersPriorityStatus
      case object InitialPriority extends PeersPriorityStatus
      case object BadNode extends PeersPriorityStatus
    }

    final case class Received(received: Int = 0) extends AnyVal {
      def increment: Received = Received(received + 1)
    }

    final case class Requested(requested: Int = 0) extends AnyVal {
      def increment: Requested = Requested(requested + 1)

      def decrement: Requested = Requested(requested - 1)

      def incrementForN(n: Int): Requested = Requested(requested + n)
    }

    private val criterionForHighP: Double = 0.75
    private val criterionForLowP: Double  = 0.50

    def calculateStatuses(res: Received, req: Requested): PeersPriorityStatus =
      res.received.toDouble / req.requested match {
        case t if t >= criterionForHighP => HighPriority
        case t if t >= criterionForLowP  => LowPriority
        case _                           => BadNode
      }
  }

  def apply(networkSettings: NetworkSettings): PrioritiesCalculator =
    PrioritiesCalculator(networkSettings, Map.empty[InetSocketAddress, (Requested, Received)])
} 
Example 38
Source File: ConnectedPeersCollection.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.network

import java.net.InetSocketAddress
import com.typesafe.scalalogging.StrictLogging
import encry.consensus.HistoryConsensus.{HistoryComparisonResult, Unknown}
import encry.network.ConnectedPeersCollection.{LastUptime, PeerInfo}
import encry.network.PeerConnectionHandler.{ConnectedPeer, ConnectionType, Outgoing}
import encry.network.PrioritiesCalculator.PeersPriorityStatus.PeersPriorityStatus.InitialPriority
import encry.network.PrioritiesCalculator.PeersPriorityStatus.PeersPriorityStatus

final case class ConnectedPeersCollection(private val peers: Map[InetSocketAddress, PeerInfo]) extends StrictLogging {

  val size: Int = peers.size

  def contains(peer: InetSocketAddress): Boolean = peers.contains(peer)

  def initializePeer(cp: ConnectedPeer): ConnectedPeersCollection = ConnectedPeersCollection(peers.updated(
    cp.socketAddress, PeerInfo(Unknown, InitialPriority, cp, Outgoing, LastUptime(0))
  ))

  def removePeer(address: InetSocketAddress): ConnectedPeersCollection = ConnectedPeersCollection(peers - address)

  def updatePriorityStatus(stats: Map[InetSocketAddress, PeersPriorityStatus]): ConnectedPeersCollection =
    ConnectedPeersCollection(updateK(stats, updateStatus))

  def updateHistoryComparisonResult(hcr: Map[InetSocketAddress, HistoryComparisonResult]): ConnectedPeersCollection =
    ConnectedPeersCollection(updateK(hcr, updateComparisonResult))

  def updateLastUptime(lup: Map[InetSocketAddress, LastUptime]): ConnectedPeersCollection =
    ConnectedPeersCollection(updateK(lup, updateUptime))

  def collect[T](p: (InetSocketAddress, PeerInfo) => Boolean,
                 f: (InetSocketAddress, PeerInfo) => T): Seq[T] = peers
    .collect { case (peer, info) if p(peer, info) => f(peer, info) }
    .toSeq

  def getAll: Map[InetSocketAddress, PeerInfo] = peers

  private def updateK[T](elems: Map[InetSocketAddress, T], f: (PeerInfo, T) => PeerInfo): Map[InetSocketAddress, PeerInfo] = {
    val newValue: Map[InetSocketAddress, PeerInfo] = for {
      (key, value) <- elems
      oldValue     <- peers.get(key)
    } yield key -> f(oldValue, value)
    peers ++ newValue
  }

  private def updateStatus: (PeerInfo, PeersPriorityStatus) => PeerInfo = (i, p) => i.copy(peerPriorityStatus = p)
  private def updateComparisonResult: (PeerInfo, HistoryComparisonResult) => PeerInfo = (i, h) => i.copy(historyComparisonResult = h)
  private def updateUptime: (PeerInfo, LastUptime) => PeerInfo = (i, u) => i.copy(lastUptime = u)

}

object ConnectedPeersCollection {

  final case class LastUptime(time: Long) extends AnyVal

  final case class PeerInfo(historyComparisonResult: HistoryComparisonResult,
                            peerPriorityStatus: PeersPriorityStatus,
                            connectedPeer: ConnectedPeer,
                            connectionType: ConnectionType,
                            lastUptime: LastUptime)

  def apply(): ConnectedPeersCollection = ConnectedPeersCollection(Map.empty[InetSocketAddress, PeerInfo])
} 
Example 39
Source File: InfoApiRoute.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.api.http.routes

import java.net.InetSocketAddress
import akka.actor.{ ActorRef, ActorRefFactory }
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import encry.api.http.DataHolderForApi._
import encry.local.miner.Miner.MinerStatus
import encry.settings._
import encry.utils.NetworkTimeProvider
import io.circe.Json
import io.circe.syntax._
import io.circe.generic.auto._
import org.encryfoundation.common.modifiers.history.{ Block, Header }
import org.encryfoundation.common.utils.Algos
import org.encryfoundation.common.utils.constants.Constants

case class InfoApiRoute(dataHolder: ActorRef,
                        settings: RESTApiSettings,
                        nodeId: Array[Byte],
                        timeProvider: NetworkTimeProvider)(implicit val context: ActorRefFactory)
    extends EncryBaseApiRoute {

  override val route: Route = (path("info") & get) {
    (dataHolder ? GetAllInfoHelper)
      .mapTo[Json]
      .okJson()
  }
}

object InfoApiRoute {

  def makeInfoJson(nodeId: Array[Byte],
                   minerInfo: MinerStatus,
                   connectedPeersLength: Int,
                   readers: Readers,
                   stateType: String,
                   nodeName: String,
                   knownPeers: Seq[InetSocketAddress],
                   storage: String,
                   nodeUptime: Long,
                   mempoolSize: Int,
                   connectWithOnlyKnownPeer: Boolean,
                   header: Option[Header],
                   block: Option[Block],
                   constants: Constants
                  ): Json = {
    val stateVersion: Option[String] = readers.s.map(_.version).map(Algos.encode)
    val stateRoot: Option[String] = readers.s.map(_.tree.rootHash).map(Algos.encode)
    val prevFullHeaderId: String = block.map(b => Algos.encode(b.header.parentId)).getOrElse("")
    InfoApi(
      nodeName,
      stateType,
      block.map(_.header.difficulty.toString).getOrElse(constants.InitialDifficulty.toString),
      block.map(_.encodedId).getOrElse(""),
      header.map(_.encodedId).getOrElse(""),
      connectedPeersLength,
      mempoolSize,
      prevFullHeaderId,
      block.map(_.header.height).getOrElse(0),
      header.map(_.height).getOrElse(0),
      stateVersion.getOrElse(""),
      nodeUptime,
      storage,
      connectWithOnlyKnownPeer,
      minerInfo.isMining,
      knownPeers.map { x =>
        x.getHostName + ":" + x.getPort
      },
      stateRoot.getOrElse("")
    ).asJson
  }
} 
Example 40
Source File: PeersApiRoute.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.api.http.routes

import java.net.{InetAddress, InetSocketAddress}
import akka.actor.{ActorRef, ActorRefFactory}
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import encry.api.http.DataHolderForApi._
import encry.api.http.routes.PeersApiRoute.PeerInfoResponse
import encry.network.BlackList.{BanReason, BanTime, BanType}
import encry.network.ConnectedPeersCollection.PeerInfo
import encry.settings.RESTApiSettings
import io.circe.Encoder
import io.circe.generic.semiauto._
import io.circe.syntax._
import scala.util.{Failure, Success, Try}

case class PeersApiRoute(override val settings: RESTApiSettings, dataHolder: ActorRef)(
  implicit val context: ActorRefFactory
) extends EncryBaseApiRoute {

  override lazy val route: Route = pathPrefix("peers") {
    connectedPeers ~ allPeers ~ bannedList ~ WebRoute.authRoute(connectPeer ~ removeFromBan, settings)
  }

  def allPeers: Route = (path("all") & get) {
    (dataHolder ? GetAllPeers)
      .mapTo[Seq[InetSocketAddress]]
      .map(_.map(_.toString).asJson).okJson()
  }

  def connectedPeers: Route = (path("connected") & get) {
    (dataHolder ? GetConnectedPeersHelper)
      .mapTo[Seq[PeerInfoResponse]].map(_.asJson).okJson()
  }

  def bannedList: Route = (path("banned") & get) {
    (dataHolder ? GetBannedPeersHelper).mapTo[Seq[(InetAddress, (BanReason, BanTime, BanType))]]
    .map(_.map(_.toString).asJson).okJson()
  }

  def connectPeer: Route = path("add") {
    post(entity(as[String]) { str =>
      complete {
        Try {
          val split = str.split(':')
          (split(0), split(1).toInt)
        } match {
          case Success((host, port)) =>
            dataHolder ! UserAddPeer(new InetSocketAddress(host, port))
            StatusCodes.OK
          case Failure(_) =>
            StatusCodes.BadRequest
        }
      }
    })
  }

  def removeFromBan: Route = path("remove") {
    post(entity(as[String]) { str =>
      complete {
        Try {
          val split = str.split(':')
          (split(0), split(1).toInt)
        } match {
          case Success((host, port)) =>
            dataHolder ! RemovePeerFromBanList(new InetSocketAddress(host, port))
            StatusCodes.OK
          case Failure(_) =>
            StatusCodes.BadRequest
        }
      }
    })
  }
}

object PeersApiRoute {

  case class PeerInfoResponse(address: String, name: Option[String], connectionType: Option[String])

  object PeerInfoResponse {

    def fromAddressAndInfo(address: InetSocketAddress, peerInfo: PeerInfo): PeerInfoResponse = PeerInfoResponse(
      address.toString,
      Some(peerInfo.connectedPeer.toString),
      Some(peerInfo.connectionType.toString)
    )
  }

  implicit val encodePeerInfoResponse: Encoder[PeerInfoResponse] = deriveEncoder
} 
Example 41
Source File: SettingsReaders.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.settings

import java.io.File
import java.net.InetSocketAddress

import com.typesafe.config.Config
import encry.storage.VersionalStorage
import encry.storage.VersionalStorage.StorageType
import encry.utils.ByteStr
import net.ceedubs.ficus.readers.ValueReader
import org.encryfoundation.common.utils.constants.{Constants, TestNetConstants}

trait SettingsReaders {
  implicit val byteStrReader: ValueReader[ByteStr] = (cfg, path) => ByteStr.decodeBase58(cfg.getString(path)).get
  implicit val storageTypeReader: ValueReader[StorageType] = (cfg, path) => cfg.getString(path) match {
    case "iodb"    => VersionalStorage.IODB
    case "LevelDb" => VersionalStorage.LevelDB
  }
  implicit val fileReader: ValueReader[File] = (cfg, path) => new File(cfg.getString(path))
  implicit val byteValueReader: ValueReader[Byte] = (cfg, path) => cfg.getInt(path).toByte
  implicit val inetSocketAddressReader: ValueReader[InetSocketAddress] = { (config: Config, path: String) =>
    val split = config.getString(path).split(":")
    new InetSocketAddress(split(0), split(1).toInt)
  }

  implicit val ConstantsSettingsReader: ValueReader[Constants] = (cfg, path) => {
    def getConstants(constantsClass: String): Constants = {
      constantsClass match {
        case "TestConstants" => TestConstants
        case "SlowMiningConstants" => SlowMiningConstants
        case _ => TestNetConstants
      }
    }
    getConstants(
      if (cfg.hasPath(path)) cfg.getString(path) else ""
    )
  }

} 
Example 42
Source File: RemoveFromBlackList.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands

import java.net.InetSocketAddress

import scala.concurrent.ExecutionContext.Implicits.global
import akka.actor.ActorRef
import encry.api.http.DataHolderForApi.RemovePeerFromBanList
import encry.cli.{ Ast, Response }
import encry.settings.EncryAppSettings
import encry.utils.NetworkTimeProvider

import scala.concurrent.Future


object RemoveFromBlackList extends Command {

  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       networkTimeProvider: NetworkTimeProvider): Future[Option[Response]] = {
    val host: String            = args.requireArg[Ast.Str]("host").s
    val port: Long              = args.requireArg[Ast.Num]("port").i
    val peer: InetSocketAddress = new InetSocketAddress(host, port.toInt)
    dataHolder ! RemovePeerFromBanList(peer)
    Future.successful(Some(Response(s"Peer $peer was removed from black list")))
  }
} 
Example 43
Source File: AddPeer.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands

import java.net.InetSocketAddress
import encry.api.http.DataHolderForApi.UserAddPeer
import akka.actor.ActorRef
import encry.cli.{Ast, Response}
import encry.settings.EncryAppSettings
import encry.utils.NetworkTimeProvider
import scala.concurrent.Future


object AddPeer extends Command {
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       networkTimeProvider: NetworkTimeProvider): Future[Option[Response]] = {
    val host: String            = args.requireArg[Ast.Str]("host").s
    val port: Long              = args.requireArg[Ast.Num]("port").i
    val peer: InetSocketAddress = new InetSocketAddress(host, port.toInt)
    dataHolder ! UserAddPeer(peer)
    Future.successful(Some(Response("Peer added!")))
  }
} 
Example 44
Source File: GetPeers.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands.peer

import java.net.InetSocketAddress
import akka.actor.ActorRef
import encry.cli.Response
import encry.cli.commands.Command
import encry.settings.EncryAppSettings
import encry.utils.NetworkTimeProvider
import akka.pattern._
import akka.util.Timeout
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import encry.api.http.DataHolderForApi.GetAllPeers

object GetPeers extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       ntp: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    (dataHolder ? GetAllPeers)
      .mapTo[Seq[InetSocketAddress]]
      .map(x => Some(Response(x.toString())))
  }
} 
Example 45
Source File: GetBan.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands.peer

import java.net.InetSocketAddress

import akka.actor.ActorRef
import encry.api.http.DataHolderForApi.{GetConnectedPeersHelper, PeerBanHelper}
import encry.api.http.routes.PeersApiRoute.PeerInfoResponse
import encry.cli.{Ast, Response}
import encry.cli.commands.Command
import encry.settings.EncryAppSettings
import encry.utils.NetworkTimeProvider
import akka.pattern._
import akka.util.Timeout
import encry.network.BlackList.BanReason.InvalidNetworkMessage
import encry.network.PeersKeeper.BanPeerFromAPI

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future

object GetBan extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       ntp: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    val host: String            = args.requireArg[Ast.Str]("host").s
    val port: Long              = args.requireArg[Ast.Num]("port").i
    val peer: InetSocketAddress = new InetSocketAddress(host, port.toInt)
    dataHolder ! PeerBanHelper(peer, "Banned by the user")
    Future.successful(Some(Response(s"Peer $peer was banned by the user")))
  }
} 
Example 46
Source File: BlackListTests.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.network

import java.net.{InetAddress, InetSocketAddress}

import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestProbe}
import encry.modifiers.InstanceFactory
import encry.network.BlackList.BanReason._
import encry.network.PeerConnectionHandler.{ConnectedPeer, Outgoing}
import encry.network.PeerConnectionHandler.ReceivableMessages.CloseConnection
import encry.network.PeersKeeper.BanPeer
import encry.settings.TestNetSettings
import org.encryfoundation.common.network.BasicMessagesRepo.Handshake
import org.scalatest.{BeforeAndAfterAll, Matchers, OneInstancePerTest, WordSpecLike}
import scala.concurrent.duration._

class BlackListTests extends WordSpecLike
  with Matchers
  with BeforeAndAfterAll
  with InstanceFactory
  with OneInstancePerTest
  with TestNetSettings {

  implicit val system: ActorSystem = ActorSystem()

  override def afterAll(): Unit = system.terminate()

  val knowPeersSettings = testNetSettings.copy(
    network = settings.network.copy(
      knownPeers = List(new InetSocketAddress("172.16.11.11", 9001)),
      connectOnlyWithKnownPeers = Some(true)
    ),
    blackList = settings.blackList.copy(
      banTime = 2 seconds,
      cleanupTime = 3 seconds
    ))

  
  "Peers keeper" should {
    "handle ban peer message correctly" in {
      val peersKeeper: TestActorRef[PeersKeeper] = TestActorRef[PeersKeeper](PeersKeeper.props(knowPeersSettings, TestProbe().ref, TestProbe().ref))
      val address: InetSocketAddress = new InetSocketAddress("0.0.0.0", 9000)
      val peerHandler: TestProbe = TestProbe()
      val connectedPeer: ConnectedPeer = ConnectedPeer(
        address,
        peerHandler.ref,
        Outgoing,
        Handshake(protocolToBytes(knowPeersSettings.network.appVersion), "test node", Some(address), System.currentTimeMillis())
      )
      peersKeeper ! BanPeer(connectedPeer, SpamSender)
      peerHandler.expectMsg(CloseConnection)
      peersKeeper.underlyingActor.blackList.contains(address.getAddress) shouldBe true
    }
    "cleanup black list by scheduler correctly" in {
      val peersKeeper: TestActorRef[PeersKeeper] = TestActorRef[PeersKeeper](PeersKeeper.props(knowPeersSettings, TestProbe().ref, TestProbe().ref))
      val address: InetSocketAddress = new InetSocketAddress("0.0.0.0", 9000)
      val peerHandler: TestProbe = TestProbe()
      val connectedPeer: ConnectedPeer = ConnectedPeer(
        address,
        peerHandler.ref,
        Outgoing,
        Handshake(protocolToBytes(knowPeersSettings.network.appVersion), "test node", Some(address), System.currentTimeMillis())
      )
      peersKeeper ! BanPeer(connectedPeer, SentPeersMessageWithoutRequest)
      Thread.sleep(6000)
      peersKeeper.underlyingActor.blackList.contains(address.getAddress) shouldBe false
    }
    "don't remove peer from black list before ban time expired" in {
      val peersKeeper: TestActorRef[PeersKeeper] = TestActorRef[PeersKeeper](PeersKeeper.props(knowPeersSettings, TestProbe().ref, TestProbe().ref))
      val address: InetSocketAddress = new InetSocketAddress("0.0.0.0", 9000)
      val peerHandler: TestProbe = TestProbe()
      val connectedPeer: ConnectedPeer = ConnectedPeer(
        address,
        peerHandler.ref,
        Outgoing,
        Handshake(protocolToBytes(knowPeersSettings.network.appVersion), "test node", Some(address), System.currentTimeMillis())
      )
      Thread.sleep(4000)
      peersKeeper ! BanPeer(connectedPeer, CorruptedSerializedBytes)
      Thread.sleep(2000)
      peersKeeper.underlyingActor.blackList.contains(address.getAddress) shouldBe true
    }
  }
} 
Example 47
Source File: DMUtils.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.network.DeliveryManagerTests

import java.net.InetSocketAddress
import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestProbe}
import encry.local.miner.Miner.{DisableMining, StartMining}
import encry.modifiers.InstanceFactory
import encry.network.DeliveryManager
import encry.network.DeliveryManager.FullBlockChainIsSynced
import encry.network.NodeViewSynchronizer.ReceivableMessages.UpdatedHistory
import encry.network.PeerConnectionHandler.{ConnectedPeer, Incoming}
import encry.settings.EncryAppSettings
import encry.view.history.History
import org.encryfoundation.common.modifiers.history.Block
import org.encryfoundation.common.network.BasicMessagesRepo.Handshake
import org.encryfoundation.common.utils.TaggedTypes.ModifierId
import scala.collection.mutable
import scala.collection.mutable.WrappedArray

object DMUtils extends InstanceFactory {

  def initialiseDeliveryManager(isBlockChainSynced: Boolean,
                                isMining: Boolean,
                                settings: EncryAppSettings)
                               (implicit actorSystem: ActorSystem): (TestActorRef[DeliveryManager], History) = {
    val history: History = generateDummyHistory(settings)
    val deliveryManager: TestActorRef[DeliveryManager] =
      TestActorRef[DeliveryManager](DeliveryManager
        .props(None, TestProbe().ref, TestProbe().ref, TestProbe().ref, TestProbe().ref, TestProbe().ref, settings))
    deliveryManager ! UpdatedHistory(history)
    if (isMining) deliveryManager ! StartMining
    else deliveryManager ! DisableMining
    if (isBlockChainSynced) deliveryManager ! FullBlockChainIsSynced
    (deliveryManager, history)
  }

  def generateBlocks(qty: Int, history: History): (History, List[Block]) =
    (0 until qty).foldLeft(history, List.empty[Block]) {
      case ((prevHistory, blocks), _) =>
        val block: Block = generateNextBlock(prevHistory)
        prevHistory.append(block.header)
        prevHistory.append(block.payload)
        val a = prevHistory.reportModifierIsValid(block)
        (a, blocks :+ block)
    }

  def toKey(id: ModifierId): WrappedArray.ofByte = new mutable.WrappedArray.ofByte(id)

  def createPeer(port: Int,
                 host: String,
                 settings: EncryAppSettings)(implicit system: ActorSystem): (InetSocketAddress, ConnectedPeer) = {
    val address = new InetSocketAddress(host, port)
    val peer: ConnectedPeer = ConnectedPeer(address, TestProbe().ref, Incoming,
      Handshake(protocolToBytes(settings.network.appVersion), host, Some(address), System.currentTimeMillis()))
    (address, peer)
  }
} 
Example 48
Source File: CassandraEventLogSettings.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.log.cassandra

import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

import akka.util.Helpers.Requiring

import com.datastax.driver.core.{ Cluster, ConsistencyLevel }
import com.typesafe.config.Config

import com.rbmhtechnology.eventuate.log._

import scala.collection.JavaConverters._
import scala.concurrent.duration._

class CassandraEventLogSettings(config: Config) extends EventLogSettings {
  import CassandraEventLogSettings._

  val writeTimeout: Long =
    config.getDuration("eventuate.log.write-timeout", TimeUnit.MILLISECONDS)

  val writeBatchSize: Int =
    config.getInt("eventuate.log.write-batch-size")

  val keyspace: String =
    config.getString("eventuate.log.cassandra.keyspace")

  val keyspaceAutoCreate: Boolean =
    config.getBoolean("eventuate.log.cassandra.keyspace-autocreate")

  val replicationFactor: Int =
    config.getInt("eventuate.log.cassandra.replication-factor")

  val tablePrefix: String =
    config.getString("eventuate.log.cassandra.table-prefix")

  val readConsistency: ConsistencyLevel =
    ConsistencyLevel.valueOf(config.getString("eventuate.log.cassandra.read-consistency"))

  val writeConsistency: ConsistencyLevel =
    ConsistencyLevel.valueOf(config.getString("eventuate.log.cassandra.write-consistency"))

  val writeRetryMax: Int =
    config.getInt("eventuate.log.cassandra.write-retry-max")

  val defaultPort: Int =
    config.getInt("eventuate.log.cassandra.default-port")

  val contactPoints =
    getContactPoints(config.getStringList("eventuate.log.cassandra.contact-points").asScala, defaultPort)

  val partitionSize: Long =
    config.getLong("eventuate.log.cassandra.partition-size")
      .requiring(
        _ > writeBatchSize,
        s"eventuate.log.cassandra.partition-size must be greater than eventuate.log.write-batch-size (${writeBatchSize})")

  val indexUpdateLimit: Int =
    config.getInt("eventuate.log.cassandra.index-update-limit")

  val initRetryMax: Int =
    config.getInt("eventuate.log.cassandra.init-retry-max")

  val initRetryDelay: FiniteDuration =
    config.getDuration("eventuate.log.cassandra.init-retry-delay", TimeUnit.MILLISECONDS).millis

  def deletionRetryDelay: FiniteDuration =
    ???

  val connectRetryMax: Int =
    config.getInt("eventuate.log.cassandra.connect-retry-max")

  val connectRetryDelay: FiniteDuration =
    config.getDuration("eventuate.log.cassandra.connect-retry-delay", TimeUnit.MILLISECONDS).millis

  val clusterBuilder: Cluster.Builder =
    Cluster.builder.addContactPointsWithPorts(contactPoints.asJava).withCredentials(
      config.getString("eventuate.log.cassandra.username"),
      config.getString("eventuate.log.cassandra.password"))
}

private object CassandraEventLogSettings {
  def getContactPoints(contactPoints: Seq[String], defaultPort: Int): Seq[InetSocketAddress] = {
    contactPoints match {
      case null | Nil => throw new IllegalArgumentException("a contact point list cannot be empty.")
      case hosts => hosts map {
        ipWithPort =>
          ipWithPort.split(":") match {
            case Array(host, port) => new InetSocketAddress(host, port.toInt)
            case Array(host)       => new InetSocketAddress(host, defaultPort)
            case msg               => throw new IllegalArgumentException(s"a contact point should have the form [host:port] or [host] but was: $msg.")
          }
      }
    }
  }
} 
Example 49
Source File: JDBCProxyServer.scala    From Hive-JDBC-Proxy   with Apache License 2.0 5 votes vote down vote up
package com.enjoyyin.hive.proxy.jdbc.server

import java.net.InetSocketAddress

import org.apache.hive.service.auth.PlainSaslHelper
import org.apache.hive.service.auth.TSetIpAddressProcessor
import org.apache.thrift.TProcessor
import org.apache.thrift.TProcessorFactory
import org.apache.thrift.protocol.TBinaryProtocol
import org.apache.thrift.server.TServer
import org.apache.thrift.server.TThreadPoolServer
import org.apache.thrift.transport.TServerSocket
import org.apache.thrift.transport.TTransport
import com.enjoyyin.hive.proxy.jdbc.thrift.ThriftProxyService
import com.enjoyyin.hive.proxy.jdbc.util.Logging
import com.enjoyyin.hive.proxy.jdbc.util.ProxyConf.maxWorkerThreads
import com.enjoyyin.hive.proxy.jdbc.util.ProxyConf.minWorkerThreads
import com.enjoyyin.hive.proxy.jdbc.util.ProxyConf.portNum
import com.enjoyyin.hive.proxy.jdbc.util.ProxyConf.proxyHost
import com.enjoyyin.hive.proxy.jdbc.util.ProxyConf.authTypeStr
import com.enjoyyin.hive.proxy.jdbc.util.ProxyConf.AUTHENTICATION_OF_CUSTOM
import com.enjoyyin.hive.proxy.jdbc.util.ProxyConf.AUTHENTICATION_OF_NONE
import com.enjoyyin.hive.proxy.jdbc.util.Utils
import com.enjoyyin.hive.proxy.jdbc.rule.basic.DefaultLoginValidateRule
import org.apache.commons.lang.StringUtils
import org.apache.hadoop.hive.conf.HiveConf


class JDBCProxyServer extends Runnable with Logging {

  var server: TServer = _

  override def run(): Unit = {
    val serverAddress = getServerSocket(proxyHost, portNum)
    //hive.server2.authentication
    //hive.server2.custom.authentication.class
    authTypeStr.toUpperCase match {
      case AUTHENTICATION_OF_NONE =>
      case AUTHENTICATION_OF_CUSTOM =>
        val customClass = HiveConf.ConfVars.HIVE_SERVER2_CUSTOM_AUTHENTICATION_CLASS
        System.setProperty(customClass.varname, DefaultLoginValidateRule.getClass.getName)
      case _ => throw new IllegalArgumentException(s"Illegal hive.server2.authentication of value $authTypeStr.")
    }
    logInfo("Is authentication enable? " + authTypeStr)
    val thriftProxyService = new ThriftProxyService
    thriftProxyService.init
    Utils.addShutdownHook(thriftProxyService.close)
    val sargs = new TThreadPoolServer.Args(serverAddress)
      .processorFactory(new SQLPlainProcessorFactory(thriftProxyService))
      .transportFactory(PlainSaslHelper.getPlainTransportFactory(authTypeStr))
      .protocolFactory(new TBinaryProtocol.Factory())
      .minWorkerThreads(minWorkerThreads)
      .maxWorkerThreads(maxWorkerThreads)

    server = new TThreadPoolServer(sargs)
    
    logInfo("JDBC Proxy listening on " + serverAddress.getServerSocket)

    server.serve()
  }

  def getServerSocket(proxyHost: String, portNum: Int): TServerSocket = {
    var serverAddress: InetSocketAddress = null
    if (StringUtils.isNotBlank(proxyHost)) {
      serverAddress = new InetSocketAddress(proxyHost, portNum)
    } else {
      serverAddress = new InetSocketAddress(portNum)
    }
    new TServerSocket(serverAddress)
  }

}

object JDBCProxyServer {
  def main(args: Array[String]): Unit = {
    val mainThread = new Thread(new JDBCProxyServer)
    mainThread.setName("hive-jdbc-proxy-main-thread")
    mainThread.start()
  }
}

class SQLPlainProcessorFactory(val service: ThriftProxyService)
    extends TProcessorFactory(null) {
  override def getProcessor(trans: TTransport): TProcessor = {
    new TSetIpAddressProcessor(service)
  }
} 
Example 50
Source File: DatadogRegistrySpec.scala    From akka-http-metrics   with Apache License 2.0 5 votes vote down vote up
package fr.davit.akka.http.metrics.datadog

import java.net.InetSocketAddress

import akka.actor.ActorSystem
import akka.http.scaladsl.model.StatusCodes
import akka.io.{IO, Udp}
import akka.testkit.{TestKit, TestProbe}
import com.timgroup.statsd.NonBlockingStatsDClient
import fr.davit.akka.http.metrics.core.HttpMetricsRegistry.{PathDimension, StatusGroupDimension}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers

import scala.concurrent.duration._

class DatadogRegistrySpec extends TestKit(ActorSystem("DatadogRegistrySpec")) with AnyFlatSpecLike with Matchers with BeforeAndAfterAll {

  val dimensions = Seq(StatusGroupDimension(StatusCodes.OK), PathDimension("/api"))

  def withFixture(test: (TestProbe, DatadogRegistry) => Any) = {
    val statsd = TestProbe()
    statsd.send(IO(Udp), Udp.Bind(statsd.ref, new InetSocketAddress(0)))
    val port = statsd.expectMsgType[Udp.Bound].localAddress.getPort
    val socket = statsd.sender()
    val client = new NonBlockingStatsDClient("", "localhost", port)
    val registry = DatadogRegistry(client)
    try {
      test(statsd, registry)
    } finally {
      client.close()
      socket ! Udp.Unbind
    }
  }

  override def afterAll(): Unit = {
    shutdown()
    super.afterAll()
  }

  "DatadogRegistry" should "send active datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.active.inc()
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.requests_active:1|c"
  }

  it should "send requests datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.requests.inc()
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.requests_count:1|c"
  }

  it should "send receivedBytes datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.receivedBytes.update(3)
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.requests_bytes:3|d"

    registry.receivedBytes.update(3, dimensions)
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.requests_bytes:3|d|#path:/api,status:2xx"
  }

  it should "send responses datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.responses.inc()
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.responses_count:1|c"

    registry.responses.inc(dimensions)
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.responses_count:1|c|#path:/api,status:2xx"
  }

  it should "send errors datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.errors.inc()
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.responses_errors_count:1|c"

    registry.errors.inc(dimensions)
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.responses_errors_count:1|c|#path:/api,status:2xx"
  }

  it should "send duration datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.duration.observe(3.seconds)
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.responses_duration:3000|d"

    registry.duration.observe(3.seconds, dimensions)
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.responses_duration:3000|d|#path:/api,status:2xx"
  }

  it should "send sentBytes datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.sentBytes.update(3)
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.responses_bytes:3|d"

    registry.sentBytes.update(3, dimensions)
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.responses_bytes:3|d|#path:/api,status:2xx"
  }

  it should "send connected datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.connected.inc()
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.connections_active:1|c"
  }
  it should "send connections datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.connections.inc()
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.connections_count:1|c"
  }
} 
Example 51
Source File: SpecificRPCTest.scala    From sbt-avrohugger   with Apache License 2.0 5 votes vote down vote up
package test

import org.specs2.mutable.Specification

import java.io.IOException
import java.net.InetSocketAddress
import java.lang.reflect.Proxy

import org.apache.avro.specific.SpecificData
import org.apache.avro.ipc.netty.NettyServer
import org.apache.avro.ipc.netty.NettyTransceiver
import org.apache.avro.ipc.Server
import org.apache.avro.ipc.specific.SpecificRequestor
import org.apache.avro.ipc.specific.SpecificResponder

import example.proto.Mail
import example.proto.Message

class SpecificRPCTest extends Specification {
  skipAll // RPC tests fail on Linux (Ubuntu 16.04), solution unknown
  // adapted from https://github.com/phunt/avro-rpc-quickstart
  "A case class " should {
    "serialize and deserialize correctly via rpc" in {
      
      class MailImpl extends Mail {
        // in this simple example just return details of the message
        def send(message: Message): String = {
          System.out.println("Sending message")
          val response: String = message.body
          response.toString
        }
      }
      
      System.out.println("Starting server")
      // usually this would be another app, but for simplicity
      val protocol = Mail.PROTOCOL
      val responder = new SpecificResponder(protocol, new MailImpl())
      val server = new NettyServer(responder, new InetSocketAddress(65111))
      System.out.println("Server started")

      val client = new NettyTransceiver(new InetSocketAddress(65111))

      // client code - attach to the server and send a message
      val requestor = new SpecificRequestor(protocol, client, SpecificData.get)
      val mailProxy: Mail = Proxy.newProxyInstance(
        SpecificData.get.getClassLoader,
        Array(classOf[Mail]),
        requestor).asInstanceOf[Mail]

      val message = new Message("avro_user", "pat", "hello_world")

      System.out.println("Calling proxy.send with message:  " + message.toString)
      System.out.println("Result: " + mailProxy.send(message).toString)
      
      val received: String = mailProxy.send(message).toString
      

      // cleanup
      client.close
      server.close
      System.out.println("Server stopped")
      
      received === message.body
      
    }
  }

} 
Example 52
Source File: SpecificRPCTest.scala    From sbt-avrohugger   with Apache License 2.0 5 votes vote down vote up
package test

import org.specs2.mutable.Specification

import java.io.IOException
import java.net.InetSocketAddress
import java.lang.reflect.Proxy

import org.apache.avro.specific.SpecificData
import org.apache.avro.ipc.netty.NettyServer
import org.apache.avro.ipc.netty.NettyTransceiver
import org.apache.avro.ipc.Server
import org.apache.avro.ipc.specific.SpecificRequestor
import org.apache.avro.ipc.specific.SpecificResponder

import example.proto.Mail
import example.proto.Message

class SpecificRPCTest extends Specification {
  skipAll // RPC tests fail on Linux (Ubuntu 16.04), solution unknown
  // adapted from https://github.com/phunt/avro-rpc-quickstart
  "A case class " should {
    "serialize and deserialize correctly via rpc" in {
      
      class MailImpl extends Mail {
        // in this simple example just return details of the message
        def send(message: Message): String = {
          System.out.println("Sending message")
          val response: String = message.body
          response.toString
        }
      }
      
      System.out.println("Starting server")
      // usually this would be another app, but for simplicity
      val protocol = Mail.PROTOCOL
      val responder = new SpecificResponder(protocol, new MailImpl())
      val server = new NettyServer(responder, new InetSocketAddress(65111))
      System.out.println("Server started")

      val client = new NettyTransceiver(new InetSocketAddress(65111))

      // client code - attach to the server and send a message
      val requestor = new SpecificRequestor(protocol, client, SpecificData.get)
      val mailProxy: Mail = Proxy.newProxyInstance(
        SpecificData.get.getClassLoader,
        Array(classOf[Mail]),
        requestor).asInstanceOf[Mail]

      val message = new Message("avro_user", "pat", "hello_world")

      System.out.println("Calling proxy.send with message:  " + message.toString)
      System.out.println("Result: " + mailProxy.send(message).toString)
      
      val received: String = mailProxy.send(message).toString
      

      // cleanup
      client.close
      server.close
      System.out.println("Server stopped")
      
      received === message.body
      
    }
  }

} 
Example 53
Source File: SpecificRPCTest.scala    From sbt-avrohugger   with Apache License 2.0 5 votes vote down vote up
package test

import org.specs2.mutable.Specification

import java.io.IOException
import java.net.InetSocketAddress
import java.lang.reflect.Proxy

import org.apache.avro.specific.SpecificData
import org.apache.avro.ipc.netty.NettyServer
import org.apache.avro.ipc.netty.NettyTransceiver
import org.apache.avro.ipc.Server
import org.apache.avro.ipc.specific.SpecificRequestor
import org.apache.avro.ipc.specific.SpecificResponder

import example.proto.Mail
import example.proto.Message

class SpecificRPCTest extends Specification {
  skipAll // RPC tests fail on Linux (Ubuntu 16.04), solution unknown
  // adapted from https://github.com/phunt/avro-rpc-quickstart
  "A case class " should {
    "serialize and deserialize correctly via rpc" in {
      
      class MailImpl extends Mail {
        // in this simple example just return details of the message
        def send(message: Message): String = {
          System.out.println("Sending message")
          val response: String = message.body
          response.toString
        }
      }
      
      System.out.println("Starting server")
      // usually this would be another app, but for simplicity
      val protocol = Mail.PROTOCOL
      val responder = new SpecificResponder(protocol, new MailImpl())
      val server = new NettyServer(responder, new InetSocketAddress(65111))
      System.out.println("Server started")

      val client = new NettyTransceiver(new InetSocketAddress(65111))

      // client code - attach to the server and send a message
      val requestor = new SpecificRequestor(protocol, client, SpecificData.get)
      val mailProxy: Mail = Proxy.newProxyInstance(
        SpecificData.get.getClassLoader,
        Array(classOf[Mail]),
        requestor).asInstanceOf[Mail]

      val message = new Message("avro_user", "pat", "hello_world")

      System.out.println("Calling proxy.send with message:  " + message.toString)
      System.out.println("Result: " + mailProxy.send(message).toString)
      
      val received: String = mailProxy.send(message).toString
      

      // cleanup
      client.close
      server.close
      System.out.println("Server stopped")
      
      received === message.body
      
    }
  }

} 
Example 54
Source File: SpecificRPCTest.scala    From sbt-avrohugger   with Apache License 2.0 5 votes vote down vote up
package test

import org.specs2.mutable.Specification

import java.io.IOException
import java.net.InetSocketAddress
import java.lang.reflect.Proxy

import org.apache.avro.specific.SpecificData
import org.apache.avro.ipc.netty.NettyServer
import org.apache.avro.ipc.netty.NettyTransceiver
import org.apache.avro.ipc.Server
import org.apache.avro.ipc.specific.SpecificRequestor
import org.apache.avro.ipc.specific.SpecificResponder

import example.proto.Mail
import example.proto.Message

class SpecificRPCTest extends Specification {
  skipAll // RPC tests fail on Linux (Ubuntu 16.04), solution unknown
  // adapted from https://github.com/phunt/avro-rpc-quickstart
  "A case class " should {
    "serialize and deserialize correctly via rpc" in {
      
      class MailImpl extends Mail {
        // in this simple example just return details of the message
        def send(message: Message): String = {
          System.out.println("Sending message")
          val response: String = message.body
          response.toString
        }
      }
      
      System.out.println("Starting server")
      // usually this would be another app, but for simplicity
      val protocol = Mail.PROTOCOL
      val responder = new SpecificResponder(protocol, new MailImpl())
      val server = new NettyServer(responder, new InetSocketAddress(65111))
      System.out.println("Server started")

      val client = new NettyTransceiver(new InetSocketAddress(65111))

      // client code - attach to the server and send a message
      val requestor = new SpecificRequestor(protocol, client, SpecificData.get)
      val mailProxy: Mail = Proxy.newProxyInstance(
        SpecificData.get.getClassLoader,
        Array(classOf[Mail]),
        requestor).asInstanceOf[Mail]

      val message = new Message("avro_user", "pat", "hello_world")

      System.out.println("Calling proxy.send with message:  " + message.toString)
      System.out.println("Result: " + mailProxy.send(message).toString)
      
      val received: String = mailProxy.send(message).toString
      

      // cleanup
      client.close
      server.close
      System.out.println("Server stopped")
      
      received === message.body
      
    }
  }

} 
Example 55
Source File: VisualMailboxMetricClient.scala    From akka-visualmailbox   with Apache License 2.0 5 votes vote down vote up
package de.aktey.akka.visualmailbox

import java.net.InetSocketAddress

import akka.actor.{Actor, ActorRef, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider, Props}
import akka.io.{IO, Udp}
import akka.util.ByteString
import de.aktey.akka.visualmailbox.packing.Packing


object VisualMailboxMetricClient extends ExtensionId[VisualMailboxMetricClient] with ExtensionIdProvider {
  override def createExtension(system: ExtendedActorSystem): VisualMailboxMetricClient = {
    new VisualMailboxMetricClient(
      system,
      VisualMailboxMetricClientConfig.fromConfig(system.settings.config)
    )
  }

  override def lookup(): ExtensionId[_ <: Extension] = VisualMailboxMetricClient
}

class VisualMailboxMetricClient(system: ExtendedActorSystem, config: VisualMailboxMetricClientConfig) extends Extension {
  private val udpSender = system.systemActorOf(
    Props(new UdpSender(config.serverAddress)).withDispatcher("de.aktey.akka.visualmailbox.client.dispatcher"),
    "de-aktey-akka-visualmailbox-sender"
  )
  system.systemActorOf(
    Props(new VisualMailboxMetricListener(udpSender)).withDispatcher("de.aktey.akka.visualmailbox.client.dispatcher"),
    "de-aktey-akka-visualmailbox-receiver"
  )
}

class VisualMailboxMetricListener(udpSender: ActorRef) extends Actor {

  import context._

  import concurrent.duration._

  var buffer: List[VisualMailboxMetric] = Nil

  system.eventStream.subscribe(self, classOf[VisualMailboxMetric])
  system.scheduler.schedule(1.second, 1.second, self, "flush")

  @scala.throws[Exception](classOf[Exception])
  override def postStop(): Unit = {
    system.eventStream.unsubscribe(self)
  }

  def receive: Receive = {
    case v: VisualMailboxMetric =>
      buffer ::= v
      if (buffer.size > 40) self ! "flush"

    case "flush" if buffer.nonEmpty =>
      udpSender ! Packing.pack(MetricEnvelope(1, Packing.pack(buffer)))
      buffer = Nil
  }
}

class UdpSender(remote: InetSocketAddress) extends Actor {

  import context._

  IO(Udp) ! Udp.SimpleSender

  def receive = {
    case Udp.SimpleSenderReady =>
      context.become(ready(sender()))
  }

  def ready(send: ActorRef): Receive = {
    case msg: Array[Byte] =>
      send ! Udp.Send(ByteString(msg), remote)
  }
} 
Example 56
Source File: SparkStressImplicits.scala    From spark-cassandra-stress   with Apache License 2.0 5 votes vote down vote up
package com.datastax.sparkstress

import java.net.InetSocketAddress

import com.datastax.spark.connector.cql.CassandraConnector
import com.datastax.spark.connector.writer.RowWriterFactory
import org.apache.spark.rdd.RDD

import scala.collection.JavaConverters._

object SparkStressImplicits {

  def bulkSaveToCassandra[T: RowWriterFactory](rdd: RDD[T], keyspace: String, table: String): Unit = {
    // bulk save was removed in 6.9
    throw new UnsupportedOperationException
  }

  def clusterSize(connector: CassandraConnector): Int = {
    connector.withSessionDo(_.getMetadata.getNodes.size())
  }

  def getLocalDC(connector: CassandraConnector): String = {
    val hostsInProvidedDC = connector.hosts
    connector.withSessionDo(
      _.getMetadata
        .getNodes
        .values()
        .asScala
        .find(node => hostsInProvidedDC.contains(node.getEndPoint.resolve().asInstanceOf[InetSocketAddress]))
        .map(_.getDatacenter)
        .getOrElse("Analytics")
    )
  }
} 
Example 57
Source File: package.scala    From drunk   with Apache License 2.0 5 votes vote down vote up
package com.github.jarlakxen

import java.net.InetSocketAddress
import java.nio.channels.ServerSocketChannel

import scala.concurrent._
import scala.concurrent.duration._

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Route
import akka.stream.ActorMaterializer
import akka.testkit._

import org.scalatest.BeforeAndAfterAll

package object drunk {

  trait TestHttpServer extends BeforeAndAfterAll {
    this: Spec =>

    implicit val system: ActorSystem = ActorSystem("drunk-test")
    implicit def executor = system.dispatcher
    implicit val materializer = ActorMaterializer()

    private def temporaryServerAddress(interface: String = "127.0.0.1"): InetSocketAddress = {
      val serverSocket = ServerSocketChannel.open()
      try {
        serverSocket.socket.bind(new InetSocketAddress(interface, 0))
        val port = serverSocket.socket.getLocalPort
        new InetSocketAddress(interface, port)
      } finally serverSocket.close()
    }

    private def temporaryServerHostnameAndPort(interface: String = "127.0.0.1"): (String, Int) = {
      val socketAddress = temporaryServerAddress(interface)
      (socketAddress.getHostName, socketAddress.getPort)
    }

    val (host, port) = temporaryServerHostnameAndPort()

    override protected def beforeAll(): Unit =
      Http().bindAndHandle(serverRoutes, host, port).futureValue

    override protected def afterAll(): Unit =
      TestKit.shutdownActorSystem(system)

    def serverRoutes: Route
  }

} 
Example 58
Source File: RoundTripThriftSmallBenchmark.scala    From finagle-serial   with Apache License 2.0 5 votes vote down vote up
package io.github.finagle.serial.benchmark

import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

import com.twitter.finagle.{Client, Server, Service, ThriftMux}
import com.twitter.util.{Closable, Await, Future}
import org.openjdk.jmh.annotations._


@State(Scope.Thread)
class RoundTripThriftSmallBenchmark {
  private val smallSize = 20

  val small: thriftscala.Small =
    thriftscala.Small((for (i <- 1 to smallSize) yield i % 2 == 0).toList, "foo bar baz")

  val echo = new thriftscala.EchoService.FutureIface {
    def echo(small: thriftscala.Small) = Future.value(small)
  }

  var s: Closable = _
  var c: thriftscala.EchoService.FutureIface = _

  @Setup
  def setUp(): Unit = {
    s = ThriftMux.serveIface(new InetSocketAddress(8124), echo)
    c = ThriftMux.newIface[thriftscala.EchoService.FutureIface]("localhost:8124")
  }

  @TearDown
  def tearDown(): Unit = {
    Await.ready(s.close())
  }

  @Benchmark
  @BenchmarkMode(Array(Mode.Throughput))
  @OutputTimeUnit(TimeUnit.SECONDS)
  def test: thriftscala.Small = Await.result(c.echo(small))
} 
Example 59
Source File: RoundTripBenchmark.scala    From finagle-serial   with Apache License 2.0 5 votes vote down vote up
package io.github.finagle.serial.benchmark

import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

import com.twitter.finagle.{Server, Client, Service}
import com.twitter.util.{Closable, Await, Future}
import org.openjdk.jmh.annotations._

@State(Scope.Thread)
abstract class RoundTripBenchmark[A](val workload: A) {

  val echo = new Service[A, A] {
    override def apply(a: A) = Future.value(a)
  }

  var s: Closable = _
  var c: Service[A, A] = _

  def server: Server[A, A]
  def client: Client[A, A]

  @Setup
  def setUp(): Unit = {
    s = server.serve(new InetSocketAddress(8123), echo)
    c = client.newService("localhost:8123")
  }

  @TearDown
  def tearDown(): Unit = {
    Await.ready(c.close())
    Await.ready(s.close())
  }

  @Benchmark
  @BenchmarkMode(Array(Mode.Throughput))
  @OutputTimeUnit(TimeUnit.SECONDS)
  def test: A = Await.result(c(workload))
} 
Example 60
Source File: SerialIntegrationTest.scala    From finagle-serial   with Apache License 2.0 5 votes vote down vote up
package io.github.finagle.serial.tests

import com.twitter.finagle.{Client, ListeningServer, Server, Service}
import com.twitter.util.{Await, Future, Try}
import io.github.finagle.serial.Serial
import java.net.{InetAddress, InetSocketAddress}
import org.scalatest.Matchers
import org.scalatest.prop.Checkers
import org.scalacheck.{Arbitrary, Gen, Prop}


  def testFunctionService[I, O](
    f: I => O
  )(implicit
    inCodec: C[I],
    outCodec: C[O],
    arb: Arbitrary[I]
  ): Unit = {
    val (fServer, fClient) = createServerAndClient(f)(inCodec, outCodec)

    check(serviceFunctionProp(fClient)(f)(arb.arbitrary))

    Await.result(fServer.close())
  }
} 
Example 61
Source File: ResourceManagerLinkIT.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.core.helpers

import java.net.InetSocketAddress
import java.nio.channels.ServerSocketChannel

import com.stratio.sparta.serving.core.config.SpartaConfig
import com.typesafe.config.ConfigFactory
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit.JUnitRunner

@RunWith(classOf[JUnitRunner])
class ResourceManagerLinkIT extends FlatSpec with
  ShouldMatchers with Matchers with BeforeAndAfter {

  var serverSocket: ServerSocketChannel = _
  val sparkUIPort = 4040
  val mesosPort = 5050
  val localhost = "127.0.0.1"

  after {
    serverSocket.close()
  }

  it should "return local Spark UI link" in {
    serverSocket = ServerSocketChannel.open()
    val localhostName = java.net.InetAddress.getLocalHost().getHostName()
    serverSocket.socket.bind(new InetSocketAddress(localhostName, sparkUIPort))
    val config = ConfigFactory.parseString(
      """
        |sparta{
        |  config {
        |    executionMode = local
        |  }
        |}
      """.stripMargin)
    SpartaConfig.initMainConfig(Option(config))
    ResourceManagerLinkHelper.getLink("local") should be(Some(s"http://${localhostName}:${sparkUIPort}"))
  }

  it should "return Mesos UI link" in {
    serverSocket = ServerSocketChannel.open()
    serverSocket.socket.bind(new InetSocketAddress(localhost,mesosPort))
    val config = ConfigFactory.parseString(
      """
        |sparta{
        |  config {
        |    executionMode = mesos
        |  }
        |
        |  mesos {
        |    master = "mesos://127.0.0.1:7077"
        |  }
        |}
      """.stripMargin)
    SpartaConfig.initMainConfig(Option(config))
    ResourceManagerLinkHelper.getLink("mesos") should be(Some(s"http://$localhost:$mesosPort"))
  }

} 
Example 62
Source File: FlumeInput.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.plugin.input.flume

import java.io.Serializable
import java.net.InetSocketAddress

import com.stratio.sparta.sdk.pipeline.input.Input
import com.stratio.sparta.sdk.properties.ValidatingPropertyMap._
import org.apache.spark.sql.Row
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.flume.FlumeUtils

class FlumeInput(properties: Map[String, Serializable]) extends Input(properties) {

  val DEFAULT_FLUME_PORT = 11999
  val DEFAULT_ENABLE_DECOMPRESSION = false
  val DEFAULT_MAXBATCHSIZE = 1000
  val DEFAULT_PARALLELISM = 5

  def initStream(ssc: StreamingContext, sparkStorageLevel: String): DStream[Row] = {

    if (properties.getString("type").equalsIgnoreCase("pull")) {
      FlumeUtils.createPollingStream(
        ssc,
        getAddresses,
        storageLevel(sparkStorageLevel),
        maxBatchSize,
        parallelism
      ).map(data => Row(data.event.getBody.array))
    } else {
      // push
      FlumeUtils.createStream(
        ssc, properties.getString("hostname"),
        properties.getString("port").toInt,
        storageLevel(sparkStorageLevel),
        enableDecompression
      ).map(data => Row(data.event.getBody.array))
    }
  }

  private def getAddresses: Seq[InetSocketAddress] =
    properties.getMapFromJsoneyString("addresses")
      .map(values => (values.get("host"), values.get("port")))
      .map {
        case (Some(address), None) =>
          new InetSocketAddress(address, DEFAULT_FLUME_PORT)
        case (Some(address), Some(port)) =>
          new InetSocketAddress(address, port.toInt)
        case _ =>
          throw new IllegalStateException(s"Invalid configuration value for addresses : ${properties.get("addresses")}")
      }

  private def enableDecompression: Boolean =
    properties.hasKey("enableDecompression") match {
      case true => properties.getBoolean("enableDecompression")
      case false => DEFAULT_ENABLE_DECOMPRESSION
    }

  private def parallelism: Int = {
    properties.hasKey("parallelism") match {
      case true => properties.getString("parallelism").toInt
      case false => DEFAULT_PARALLELISM
    }
  }

  private def maxBatchSize: Int =
    properties.hasKey("maxBatchSize") match {
      case true => properties.getString("maxBatchSize").toInt
      case false => DEFAULT_MAXBATCHSIZE
    }
} 
Example 63
Source File: Server.scala    From mqttd   with MIT License 5 votes vote down vote up
package plantae.citrus.mqtt.actors.connection

import java.net.InetSocketAddress

import akka.actor.{Actor, ActorLogging, Props}
import akka.io.{IO, Tcp}
import plantae.citrus.mqtt.actors.SystemRoot

class Server extends Actor with ActorLogging {

  import Tcp._
  import context.system

  IO(Tcp) ! Bind(self, new InetSocketAddress(
    SystemRoot.config.getString("mqtt.broker.hostname"),
    SystemRoot.config.getInt("mqtt.broker.port"))
    , backlog = 1023)

  def receive = {
    case Bound(localAddress) =>

    case CommandFailed(_: Bind) =>
      log.error("bind failure")
      context stop self

    case Connected(remote, local) =>
      log.info("new connection" + remote)
      sender ! Register(context.actorOf(Props(classOf[PacketBridge], sender)))
  }
} 
Example 64
Source File: TestServer.scala    From finagle-prometheus   with MIT License 5 votes vote down vote up
package com.samstarling.prometheusfinagle.examples

import java.net.InetSocketAddress

import com.samstarling.prometheusfinagle.PrometheusStatsReceiver
import com.samstarling.prometheusfinagle.metrics.{MetricsService, Telemetry}
import com.twitter.finagle.builder.ServerBuilder
import com.twitter.finagle.http._
import com.twitter.finagle.http.path._
import com.twitter.finagle.http.service.{NotFoundService, RoutingService}
import com.twitter.finagle.loadbalancer.perHostStats
import com.twitter.finagle.{Http, Service}
import io.prometheus.client.CollectorRegistry

object TestServer extends App {

  perHostStats.parse("true")

  val registry = CollectorRegistry.defaultRegistry
  val statsReceiver = new PrometheusStatsReceiver(registry)
  val telemetry = new Telemetry(registry, "namespace")

  val emojiService = new EmojiService(statsReceiver)
  val metricsService = new MetricsService(registry)
  val echoService = new EchoService
  val customTelemetryService = new CustomTelemetryService(telemetry)

  val router: Service[Request, Response] =
    RoutingService.byMethodAndPathObject {
      case (Method.Get, Root / "emoji")   => emojiService
      case (Method.Get, Root / "metrics") => metricsService
      case (Method.Get, Root / "echo")    => echoService
      case (Method.Get, Root / "custom")  => customTelemetryService
      case _                              => new NotFoundService
    }

  ServerBuilder()
    .stack(Http.server)
    .name("testserver")
    .bindTo(new InetSocketAddress(8080))
    .build(router)
} 
Example 65
Source File: RpcServerImpl.scala    From finagle-protobuf   with Apache License 2.0 5 votes vote down vote up
package com.twitter.finagle.protobuf.rpc.impl

import com.twitter.finagle.protobuf.rpc.channel.ProtoBufCodec
import com.twitter.finagle.protobuf.rpc.{RpcServer, Util}
import com.twitter.util._
import com.twitter.util.Duration
import com.twitter.util.FuturePool
import com.twitter.finagle.builder.{Server, ServerBuilder, ServerConfig}
import java.net.InetSocketAddress
import java.util.logging.Logger
import scala.None
import java.util.concurrent.Executors
import java.util.concurrent.ExecutorService
import com.google.common.base.Preconditions
import com.twitter.finagle.protobuf.rpc.ServiceExceptionHandler
import com.google.protobuf.DynamicMessage
import com.google.protobuf.DynamicMessage.Builder
import com.google.protobuf._
import com.google.protobuf.Descriptors._
import com.twitter.util.Promise

class RpcServerImpl(sb: ServerBuilder[(String, Message), (String, Message), Any, Any, Any], port: Int, service: Service, handler: ServiceExceptionHandler[Message], executorService: ExecutorService) extends RpcServer {

  private val log = Logger.getLogger(getClass.toString)

  Preconditions.checkNotNull(executorService)
  Preconditions.checkNotNull(handler)

  private val execFuturePool = new ExecutorServiceFuturePool(executorService)

  private val server: Server = ServerBuilder.safeBuild(ServiceDispatcher(service, handler, execFuturePool),
    sb
      .codec(new ProtoBufCodec(service))
      .name(getClass().getName())
      .bindTo(new InetSocketAddress(port)))

  def close(d: Duration) = {
    server.close(d)
  }
}

class ServiceDispatcher(service: com.google.protobuf.Service, handler: ServiceExceptionHandler[Message], futurePool: FuturePool) extends com.twitter.finagle.Service[(String, Message), (String, Message)] {

  private val log = Logger.getLogger(getClass.toString)

  def apply(request: (String, Message)) = {

    val methodName = request._1
    val reqMessage = request._2

    Util.log("Request", methodName, reqMessage)
    val m = service.getDescriptorForType().findMethodByName(methodName);
    if (m == null) {
      throw new java.lang.AssertionError("Should never happen, we already decoded " + methodName)
    }

    val promise = new Promise[(String, Message)]()

    // dispatch to the service method
    val task = () => {
      try {
        service.callMethod(m, null, reqMessage, new RpcCallback[Message]() {

          def run(msg: Message) = {
            Util.log("Response", methodName, msg)
            promise.setValue((methodName, msg))
          }

        })
      } catch {
        case e: RuntimeException => {
          log.warning("#apply# Exception: "+e.getMessage)
          if (handler.canHandle(e)) {
            promise.setValue((methodName, handler.handle(e, constructEmptyResponseMessage(m))))
          }
        }
      }
    }
    futurePool(task())
    promise
  }

  def constructEmptyResponseMessage(m: MethodDescriptor): Message = {
    val outputType = m.getOutputType();
    DynamicMessage.newBuilder(outputType).build()
  }
}

object ServiceDispatcher {
  def apply(service: com.google.protobuf.Service, handler: ServiceExceptionHandler[Message], futurePool: FuturePool): ServiceDispatcher = {
    new ServiceDispatcher(service, handler, futurePool)
  }
} 
Example 66
Source File: RpcChannelImpl.scala    From finagle-protobuf   with Apache License 2.0 5 votes vote down vote up
package com.twitter.finagle.protobuf.rpc.impl

import java.net.InetSocketAddress
import com.google.protobuf.Descriptors.MethodDescriptor
import com.google.protobuf.RpcCallback
import com.google.protobuf.Message
import com.google.protobuf.RpcChannel
import com.google.protobuf.RpcController
import com.google.protobuf.Service
import java.util.logging.Logger
import com.twitter.util.Duration
import com.twitter.util.FuturePool
import com.twitter.finagle.builder.ClientBuilder
import java.util.concurrent.ExecutorService
import com.twitter.finagle.protobuf.rpc.RpcControllerWithOnFailureCallback
import com.twitter.finagle.protobuf.rpc.channel.ProtoBufCodec
import com.twitter.finagle.ChannelClosedException
import com.twitter.finagle.protobuf.rpc.Util
import com.twitter.finagle.protobuf.rpc.ExceptionResponseHandler

class RpcChannelImpl(cb: ClientBuilder[(String, Message), (String, Message), Any, Any, Any], s: Service, handler: ExceptionResponseHandler[Message], executorService: ExecutorService) extends RpcChannel {

  private val log = Logger.getLogger(getClass.toString)

  private val futurePool = FuturePool(executorService)

  private val client: com.twitter.finagle.Service[(String, Message), (String, Message)] = cb
    .codec(new ProtoBufCodec(s))
    .unsafeBuild()

  def callMethod(m: MethodDescriptor, controller: RpcController,
                 request: Message, responsePrototype: Message,
                 done: RpcCallback[Message]): Unit = {
    // retries is a workaround for ChannelClosedException raised when servers shut down.
    val retries = 3

    callMethod(m, controller, request, responsePrototype, done, retries)
  }

  def callMethod(m: MethodDescriptor, controller: RpcController,
                 request: Message, responsePrototype: Message,
                 done: RpcCallback[Message], retries: Int): Unit = {

    Util.log("Request", m.getName(), request)
    val req = (m.getName(), request)

    client(req) onSuccess {
      result =>
        Util.log("Response", m.getName(), result._2)
        futurePool({
          handle(done, controller, result._2)
        })
    } onFailure {
      e =>
        log.warning("#callMethod# Failed. "+ e.getMessage)
        e match {
          case cc: ChannelClosedException => if (retries > 1) {
            log.warning("#callMethod# Retrying.")
            callMethod(m, controller, request, responsePrototype, done, retries - 1);
          } else {
            controller.asInstanceOf[RpcControllerWithOnFailureCallback].setFailed(e)
          }
          case _ => controller.asInstanceOf[RpcControllerWithOnFailureCallback].setFailed(e)
        }
    }
  }

  def handle(done: RpcCallback[Message], controller: RpcController, m: Message) {
    if (handler.canHandle(m)) {
      controller.asInstanceOf[RpcControllerWithOnFailureCallback].setFailed(handler.handle(m))
    } else {
      done.run(m)
    }
  }

  def release() {
     client.close()
  }
} 
Example 67
Source File: NettyUtil.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.transport.netty

import java.net.InetSocketAddress
import java.util.concurrent.{Executors, ThreadFactory}

import org.jboss.netty.bootstrap.{ClientBootstrap, ServerBootstrap}
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory
import org.jboss.netty.channel.{Channel, ChannelFactory, ChannelPipelineFactory}

object NettyUtil {

  def newNettyServer(
      name: String,
      pipelineFactory: ChannelPipelineFactory,
      buffer_size: Int,
      inputPort: Int = 0): (Int, Channel) = {
    val bossFactory: ThreadFactory = new NettyRenameThreadFactory(name + "-boss")
    val workerFactory: ThreadFactory = new NettyRenameThreadFactory(name + "-worker")
    val factory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(bossFactory),
      Executors.newCachedThreadPool(workerFactory), 1)

    val bootstrap = createServerBootStrap(factory, pipelineFactory, buffer_size)
    val channel: Channel = bootstrap.bind(new InetSocketAddress(inputPort))
    val port = channel.getLocalAddress().asInstanceOf[InetSocketAddress].getPort()
    (port, channel)
  }

  def createServerBootStrap(
      factory: ChannelFactory, pipelineFactory: ChannelPipelineFactory, buffer_size: Int)
    : ServerBootstrap = {
    val bootstrap = new ServerBootstrap(factory)
    bootstrap.setOption("child.tcpNoDelay", true)
    bootstrap.setOption("child.receiveBufferSize", buffer_size)
    bootstrap.setOption("child.keepAlive", true)
    bootstrap.setPipelineFactory(pipelineFactory)
    bootstrap
  }

  def createClientBootStrap(
      factory: ChannelFactory, pipelineFactory: ChannelPipelineFactory, buffer_size: Int)
    : ClientBootstrap = {
    val bootstrap = new ClientBootstrap(factory)
    bootstrap.setOption("tcpNoDelay", true)
    bootstrap.setOption("sendBufferSize", buffer_size)
    bootstrap.setOption("keepAlive", true)
    bootstrap.setPipelineFactory(pipelineFactory)
    bootstrap
  }
} 
Example 68
Source File: ClientTest.scala    From bitcoin-s-spv-node   with MIT License 5 votes vote down vote up
package org.bitcoins.spvnode.networking

import java.net.{InetSocketAddress, ServerSocket}

import akka.actor.ActorSystem
import akka.io.{Inet, Tcp}
import akka.testkit.{ImplicitSender, TestActorRef, TestKit, TestProbe}
import org.bitcoins.core.config.TestNet3
import org.bitcoins.core.util.{BitcoinSLogger, BitcoinSUtil}
import org.bitcoins.spvnode.messages.control.VersionMessage
import org.bitcoins.spvnode.messages.{NetworkPayload, VersionMessage}
import org.bitcoins.spvnode.util.BitcoinSpvNodeUtil
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FlatSpecLike, MustMatchers}

import scala.concurrent.duration._
import scala.util.Try

class ClientTest extends TestKit(ActorSystem("ClientTest")) with FlatSpecLike
  with MustMatchers with ImplicitSender
  with BeforeAndAfter with BeforeAndAfterAll with BitcoinSLogger {

  "Client" must "connect to a node on the bitcoin network, " +
    "send a version message to a peer on the network and receive a version message back, then close that connection" in {
    val probe = TestProbe()

    val client = TestActorRef(Client.props,probe.ref)

    val remote = new InetSocketAddress(TestNet3.dnsSeeds(0), TestNet3.port)
    val randomPort = 23521
    //random port
    client ! Tcp.Connect(remote, Some(new InetSocketAddress(randomPort)))

    //val bound : Tcp.Bound = probe.expectMsgType[Tcp.Bound]
    val conn : Tcp.Connected = probe.expectMsgType[Tcp.Connected]

    //make sure the socket is currently bound
    Try(new ServerSocket(randomPort)).isSuccess must be (false)
    client ! Tcp.Abort
    val confirmedClosed = probe.expectMsg(Tcp.Aborted)

    //make sure the port is now available
    val boundSocket = Try(new ServerSocket(randomPort))
    boundSocket.isSuccess must be (true)

    boundSocket.get.close()

  }

  it must "bind connect to two nodes on one port" in {
    //NOTE if this test case fails it is more than likely because one of the two dns seeds
    //below is offline
    val remote1 = new InetSocketAddress(TestNet3.dnsSeeds(0), TestNet3.port)
    val remote2 = new InetSocketAddress(TestNet3.dnsSeeds(2), TestNet3.port)

    val probe1 = TestProbe()
    val probe2 = TestProbe()


    val client1 = TestActorRef(Client.props, probe1.ref)
    val client2 = TestActorRef(Client.props, probe2.ref)

    val local1 = new InetSocketAddress(TestNet3.port)
    val options = List(Inet.SO.ReuseAddress(true))
    client1 ! Tcp.Connect(remote1,Some(local1),options)


    probe1.expectMsgType[Tcp.Connected]
    client1 ! Tcp.Abort

    val local2 = new InetSocketAddress(TestNet3.port)
    client2 ! Tcp.Connect(remote2,Some(local2),options)
    probe2.expectMsgType[Tcp.Connected](5.seconds)
    client2 ! Tcp.Abort
  }

  override def afterAll: Unit = {
    TestKit.shutdownActorSystem(system)
  }


} 
Example 69
Source File: ZmqConfig.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.rpc.config

import java.net.InetSocketAddress

import org.bitcoins.core.util.BitcoinSLogger

sealed trait ZmqConfig {
  def hashBlock: Option[InetSocketAddress]
  def rawBlock: Option[InetSocketAddress]
  def hashTx: Option[InetSocketAddress]
  def rawTx: Option[InetSocketAddress]
}

object ZmqConfig extends BitcoinSLogger {

  private case class ZmqConfigImpl(
      hashBlock: Option[InetSocketAddress],
      rawBlock: Option[InetSocketAddress],
      hashTx: Option[InetSocketAddress],
      rawTx: Option[InetSocketAddress]
  ) extends ZmqConfig

  def apply(
      hashBlock: Option[InetSocketAddress] = None,
      rawBlock: Option[InetSocketAddress] = None,
      hashTx: Option[InetSocketAddress] = None,
      rawTx: Option[InetSocketAddress] = None
  ): ZmqConfig =
    ZmqConfigImpl(hashBlock = hashBlock,
                  rawBlock = rawBlock,
                  hashTx = hashTx,
                  rawTx = rawTx)

  
  def fromPort(port: Int): ZmqConfig = {
    val uri = new InetSocketAddress("tcp://127.0.0.1", port)
    ZmqConfig(hashBlock = Some(uri),
              rawBlock = Some(uri),
              hashTx = Some(uri),
              rawTx = Some(uri))
  }

  def fromConfig(config: BitcoindConfig): ZmqConfig =
    ZmqConfig(hashBlock = config.zmqpubhashblock,
              hashTx = config.zmqpubhashtx,
              rawBlock = config.zmqpubrawblock,
              rawTx = config.zmqpubrawtx)

} 
Example 70
Source File: ZMQSubscriberTest.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.zmq

import java.net.InetSocketAddress

import org.bitcoins.core.util.BytesUtil
import org.scalatest.flatspec.AsyncFlatSpec
import org.slf4j.LoggerFactory
import org.zeromq.{ZFrame, ZMQ, ZMsg}
import scodec.bits.ByteVector

import scala.concurrent.Promise

class ZMQSubscriberTest extends AsyncFlatSpec {
  private val logger = LoggerFactory.getLogger(this.getClass().toString)

  behavior of "ZMQSubscriber"

  it must "connect to a regtest instance of a daemon and stream txs/blocks from it" in {
    //note for this unit test to pass, you need to setup a bitcoind instance yourself
    //and set the bitcoin.conf file to allow for
    //zmq connections
    //see: https://github.com/bitcoin/bitcoin/blob/master/doc/zmq.md
    val socket = new InetSocketAddress("tcp://127.0.0.1", 29000)

    val zmqSub =
      new ZMQSubscriber(socket, None, None, rawTxListener, rawBlockListener)
    //stupid, doesn't test anything, for now. You need to look at log output to verify this is working
    // TODO: In the future this could use the testkit to verify the subscriber by calling generate(1)
    zmqSub.start()
    Thread.sleep(10000) // 10 seconds
    zmqSub.stop

    succeed
  }

  it must "be able to subscribe to a publisher and read a value" in {
    val port = Math.abs(scala.util.Random.nextInt % 14000) + 1000
    val socket = new InetSocketAddress("tcp://127.0.0.1", port)

    val context = ZMQ.context(1)
    val publisher = context.socket(ZMQ.PUB)

    val uri = socket.getHostString + ":" + socket.getPort
    publisher.bind(uri)

    val valuePromise = Promise[String]()
    val fakeBlockListener: Option[ByteVector => Unit] = Some { bytes =>
      val str = new String(bytes.toArray)
      valuePromise.success(str)
      ()
    }

    val sub = new ZMQSubscriber(socket, None, None, None, fakeBlockListener)
    sub.start()
    Thread.sleep(1000)

    val testValue = "sweet, sweet satoshis"

    val msg = new ZMsg()
    msg.add(new ZFrame(RawBlock.topic))
    msg.add(new ZFrame(testValue))

    val sent = msg.send(publisher)
    assert(sent)

    valuePromise.future.map { str =>
      sub.stop
      publisher.close()
      context.term()

      assert(str == testValue)
    }
  }

  val rawBlockListener: Option[ByteVector => Unit] = Some {
    { bytes: ByteVector =>
      val hex = BytesUtil.encodeHex(bytes)
      logger.debug(s"received raw block ${hex}")
    }
  }

  val hashBlockListener: Option[ByteVector => Unit] = Some {
    { bytes: ByteVector =>
      val hex = BytesUtil.encodeHex(bytes)
      logger.debug(s"received raw block hash ${hex}")

    }
  }

  val rawTxListener: Option[ByteVector => Unit] = Some {
    { bytes: ByteVector =>
      val hex = BytesUtil.encodeHex(bytes)
      logger.debug(s"received raw tx ${hex}")
    }
  }
} 
Example 71
Source File: NetworkIpAddress.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.core.p2p

import java.net.{InetAddress, InetSocketAddress}

import org.bitcoins.core.number.UInt32
import org.bitcoins.core.serializers.p2p._
import org.bitcoins.crypto.{Factory, NetworkElement}
import scodec.bits._


  def writeAddress(iNetAddress: InetAddress): ByteVector = {
    if (iNetAddress.getAddress.size == 4) {
      //this means we need to convert the IPv4 address to an IPv6 address
      //first we have an 80 bit prefix of zeros
      val zeroBytes = ByteVector.fill(10)(0)
      //the next 16 bits are ones
      val oneBytes = hex"ffff"

      val prefix: ByteVector = zeroBytes ++ oneBytes
      val addr = prefix ++ ByteVector(iNetAddress.getAddress)
      addr
    } else {
      ByteVector(iNetAddress.getAddress)
    }
  }

  private case class NetworkIpAddressImpl(
      time: UInt32,
      services: ServiceIdentifier,
      address: InetAddress,
      port: Int)
      extends NetworkIpAddress

  def apply(
      time: UInt32,
      services: ServiceIdentifier,
      address: InetAddress,
      port: Int): NetworkIpAddress = {
    NetworkIpAddressImpl(time, services, address, port)
  }

  def fromBytes(bytes: ByteVector): NetworkIpAddress =
    RawNetworkIpAddressSerializer.read(bytes)

  def fromInetSocketAddress(
      socket: InetSocketAddress,
      services: ServiceIdentifier): NetworkIpAddress = {
    //TODO: this might be wrong, read this time documentation above
    val timestamp = UInt32(System.currentTimeMillis() / 1000)

    NetworkIpAddress(
      time = timestamp,
      services = services,
      address = socket.getAddress,
      port = socket.getPort
    )
  }
} 
Example 72
Source File: NetworkUtil.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.core.util

import java.net.InetSocketAddress

abstract class NetworkUtil {

  private def parsePort(port: String): Int = {
    lazy val errorMsg = s"Invalid peer port: $port"
    try {
      val res = port.toInt
      if (res < 0 || res > 0xffff) {
        throw new RuntimeException(errorMsg)
      }
      res
    } catch {
      case _: NumberFormatException =>
        throw new RuntimeException(errorMsg)
    }
  }

  
  def parseInetSocketAddress(
      address: String,
      defaultPort: Int): InetSocketAddress = {
    address.split(":") match {
      case Array(host)       => new InetSocketAddress(host, defaultPort)
      case Array(host, port) => new InetSocketAddress(host, parsePort(port))
      case _                 => throw new RuntimeException(s"Invalid peer address: $address")
    }
  }
}

object NetworkUtil extends NetworkUtil 
Example 73
Source File: UDPSender.scala    From censorinus   with MIT License 5 votes vote down vote up
package github.gphat.censorinus

import java.net.{InetSocketAddress,SocketException}
import java.nio.ByteBuffer
import java.nio.channels.UnresolvedAddressException
import java.nio.channels.DatagramChannel

class UDPSender(
  hostname: String = "localhost",
  port: Int = MetricSender.DEFAULT_STATSD_PORT,
  allowExceptions: Boolean = false
) extends MetricSender {

  lazy val clientSocket = DatagramChannel.open.connect(new InetSocketAddress(hostname, port))

  def send(message: ByteBuffer): Unit = {
    try {
      val _ = clientSocket.write(message)
    } catch {
      case se @ (_ : SocketException | _ : UnresolvedAddressException) => {
        // Check if we're allowing exceptions and rethrow if so. We didn't use
        // a guard on the case because then we'd need a second case to catch
        // the !allowExceptions case!
        if(allowExceptions) {
          throw se
        }
      }
    }
  }

  def shutdown: Unit = clientSocket.close
} 
Example 74
Source File: ServiceLocatorSessionProvider.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.persistence.cassandra

import java.net.InetSocketAddress
import java.net.URI

import scala.collection.immutable
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.Promise
import scala.concurrent.duration._
import scala.util.control.NoStackTrace

import akka.actor.ActorSystem
import akka.persistence.cassandra.ConfigSessionProvider
import com.typesafe.config.Config
import play.api.Logger


private[lagom] final class ServiceLocatorSessionProvider(system: ActorSystem, config: Config)
    extends ConfigSessionProvider(system, config) {
  private val log = Logger(getClass)

  override def lookupContactPoints(
      clusterId: String
  )(implicit ec: ExecutionContext): Future[immutable.Seq[InetSocketAddress]] = {
    ServiceLocatorHolder(system).serviceLocatorEventually.flatMap { serviceLocatorAdapter =>
      serviceLocatorAdapter.locateAll(clusterId).map {
        case Nil => throw new NoContactPointsException(s"No contact points for [$clusterId]")
        case uris =>
          log.debug(s"Found Cassandra contact points: $uris")

          // URIs must be all valid
          uris.foreach { uri =>
            require(uri.getHost != null, s"missing host in $uri for Cassandra contact points $clusterId")
            require(uri.getPort != -1, s"missing port in $uri for Cassandra contact points $clusterId")
          }

          uris.map { uri =>
            new InetSocketAddress(uri.getHost, uri.getPort)
          }
      }
    }
  }
}

private[lagom] final class NoContactPointsException(msg: String) extends RuntimeException(msg) with NoStackTrace 
Example 75
Source File: ServiceLocatorSessionProviderSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.persistence.cassandra

import java.net.InetSocketAddress
import java.net.URI

import akka.actor.ActorSystem
import akka.testkit.TestKit
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.Await
import scala.concurrent.Future
import org.scalatest.matchers.must.Matchers
import org.scalatest.wordspec.AnyWordSpec

class ServiceLocatorSessionProviderSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll {
  val system         = ActorSystem("test")
  val config: Config = ConfigFactory.load()
  val uri            = new URI("http://localhost:8080")

  protected override def afterAll(): Unit = {
    TestKit.shutdownActorSystem(actorSystem = system, verifySystemShutdown = true)
  }

  val locator = new ServiceLocatorAdapter {
    override def locateAll(name: String): Future[List[URI]] = {
      name match {
        case "existing" => Future.successful(List(uri))
        case "absent"   => Future.successful(Nil)
      }
    }
  }

  val providerConfig: Config = config.getConfig("lagom.persistence.read-side.cassandra")
  val provider               = new ServiceLocatorSessionProvider(system, providerConfig)
  ServiceLocatorHolder(system).setServiceLocator(locator)

  "ServiceLocatorSessionProvider" should {
    "Get the address when the contact points exist" in {
      val future = provider.lookupContactPoints("existing")

      Await.result(future, 3.seconds) mustBe Seq(new InetSocketAddress(uri.getHost, uri.getPort))
    }

    "Fail the future when the contact points do not exist" in {
      val future = provider.lookupContactPoints("absent")

      intercept[NoContactPointsException] {
        Await.result(future, 3.seconds)
      }
    }
  }
} 
Example 76
Source File: FlumeTestUtils.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.flume

import java.net.{InetSocketAddress, ServerSocket}
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util.{List => JList}
import java.util.Collections

import scala.collection.JavaConverters._

import org.apache.avro.ipc.NettyTransceiver
import org.apache.avro.ipc.specific.SpecificRequestor
import org.apache.commons.lang3.RandomUtils
import org.apache.flume.source.avro
import org.apache.flume.source.avro.{AvroFlumeEvent, AvroSourceProtocol}
import org.jboss.netty.channel.ChannelPipeline
import org.jboss.netty.channel.socket.SocketChannel
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory
import org.jboss.netty.handler.codec.compression.{ZlibDecoder, ZlibEncoder}

import org.apache.spark.util.Utils
import org.apache.spark.SparkConf


  private class CompressionChannelFactory(compressionLevel: Int)
    extends NioClientSocketChannelFactory {

    override def newChannel(pipeline: ChannelPipeline): SocketChannel = {
      val encoder = new ZlibEncoder(compressionLevel)
      pipeline.addFirst("deflater", encoder)
      pipeline.addFirst("inflater", new ZlibDecoder())
      super.newChannel(pipeline)
    }
  }

} 
Example 77
Source File: GraphiteSink.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.net.InetSocketAddress
import java.util.Properties
import java.util.concurrent.TimeUnit

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.graphite.{Graphite, GraphiteReporter, GraphiteUDP}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class GraphiteSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val GRAPHITE_DEFAULT_PERIOD = 10
  val GRAPHITE_DEFAULT_UNIT = "SECONDS"
  val GRAPHITE_DEFAULT_PREFIX = ""

  val GRAPHITE_KEY_HOST = "host"
  val GRAPHITE_KEY_PORT = "port"
  val GRAPHITE_KEY_PERIOD = "period"
  val GRAPHITE_KEY_UNIT = "unit"
  val GRAPHITE_KEY_PREFIX = "prefix"
  val GRAPHITE_KEY_PROTOCOL = "protocol"

  def propertyToOption(prop: String): Option[String] = Option(property.getProperty(prop))

  if (!propertyToOption(GRAPHITE_KEY_HOST).isDefined) {
    throw new Exception("Graphite sink requires 'host' property.")
  }

  if (!propertyToOption(GRAPHITE_KEY_PORT).isDefined) {
    throw new Exception("Graphite sink requires 'port' property.")
  }

  val host = propertyToOption(GRAPHITE_KEY_HOST).get
  val port = propertyToOption(GRAPHITE_KEY_PORT).get.toInt

  val pollPeriod = propertyToOption(GRAPHITE_KEY_PERIOD) match {
    case Some(s) => s.toInt
    case None => GRAPHITE_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = propertyToOption(GRAPHITE_KEY_UNIT) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
    case None => TimeUnit.valueOf(GRAPHITE_DEFAULT_UNIT)
  }

  val prefix = propertyToOption(GRAPHITE_KEY_PREFIX).getOrElse(GRAPHITE_DEFAULT_PREFIX)

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val graphite = propertyToOption(GRAPHITE_KEY_PROTOCOL).map(_.toLowerCase) match {
    case Some("udp") => new GraphiteUDP(new InetSocketAddress(host, port))
    case Some("tcp") | None => new Graphite(new InetSocketAddress(host, port))
    case Some(p) => throw new Exception(s"Invalid Graphite protocol: $p")
  }

  val reporter: GraphiteReporter = GraphiteReporter.forRegistry(registry)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .prefixedWith(prefix)
      .build(graphite)

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 78
Source File: NettyRpcHandlerSuite.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.rpc.netty

import java.net.InetSocketAddress
import java.nio.ByteBuffer

import io.netty.channel.Channel
import org.mockito.Matchers._
import org.mockito.Mockito._

import org.apache.spark.SparkFunSuite
import org.apache.spark.network.client.{TransportClient, TransportResponseHandler}
import org.apache.spark.network.server.StreamManager
import org.apache.spark.rpc._

class NettyRpcHandlerSuite extends SparkFunSuite {

  val env = mock(classOf[NettyRpcEnv])
  val sm = mock(classOf[StreamManager])
  when(env.deserialize(any(classOf[TransportClient]), any(classOf[ByteBuffer]))(any()))
    .thenReturn(RequestMessage(RpcAddress("localhost", 12345), null, null))

  test("receive") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelActive(client)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
  }

  test("connectionTerminated") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelActive(client)

    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelInactive(client)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
    verify(dispatcher, times(1)).postToAll(
      RemoteProcessDisconnected(RpcAddress("localhost", 40000)))
  }

} 
Example 79
Source File: AsyncSocketChannelObservable.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio.tcp

import java.net.InetSocketAddress

import monix.execution.Callback
import monix.nio._
import monix.reactive.observers.Subscriber

import scala.concurrent.Promise


final class AsyncSocketChannelObservable private[tcp] (
  host: String, port: Int,
  override val bufferSize: Int = 256 * 1024) extends AsyncChannelObservable {

  private[this] val connectedSignal = Promise[Unit]()
  private[this] var taskSocketChannel: Option[TaskSocketChannel] = None
  private[this] var closeOnComplete = true

  private[tcp] def this(tsc: TaskSocketChannel, buffSize: Int, closeWhenDone: Boolean) = {
    this("", 0, buffSize)
    this.taskSocketChannel = Option(tsc)
    this.closeOnComplete = closeWhenDone
  }

  override lazy val channel = taskSocketChannel.map(asc => asyncChannelWrapper(asc, closeOnComplete))

  override def init(subscriber: Subscriber[Array[Byte]]) = {
    import subscriber.scheduler

    if (taskSocketChannel.isDefined) {
      connectedSignal.success(())
    } else {
      val connectCallback = new Callback[Throwable, Unit]() {
        override def onSuccess(value: Unit): Unit = {
          connectedSignal.success(())
        }
        override def onError(ex: Throwable): Unit = {
          connectedSignal.failure(ex)
          closeChannel()
          subscriber.onError(ex)
        }
      }
      taskSocketChannel = Option(TaskSocketChannel())
      taskSocketChannel.foreach(_.connect(new InetSocketAddress(host, port)).runAsync(connectCallback))
    }

    connectedSignal.future
  }
} 
Example 80
Source File: AsyncSocketChannelConsumer.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio.tcp

import java.net.InetSocketAddress

import monix.execution.Callback
import monix.nio.AsyncChannelConsumer

import scala.concurrent.Promise


final class AsyncSocketChannelConsumer private[tcp] (
  host: String,
  port: Int) extends AsyncChannelConsumer {

  private[this] var taskSocketChannel: Option[TaskSocketChannel] = None
  private[this] var closeOnComplete = true

  private[tcp] def this(tsc: TaskSocketChannel, closeWhenDone: Boolean) = {
    this("", 0)
    this.taskSocketChannel = Option(tsc)
    this.closeOnComplete = closeWhenDone
  }

  override lazy val channel = taskSocketChannel.map(tsc => asyncChannelWrapper(tsc, closeOnComplete))

  override def init(subscriber: AsyncChannelSubscriber) = {
    import subscriber.scheduler

    val connectedPromise = Promise[Unit]()
    if (taskSocketChannel.isDefined) {
      connectedPromise.success(())
    } else {
      val connectCallback = new Callback[Throwable, Unit]() {
        override def onSuccess(value: Unit): Unit = {
          connectedPromise.success(())
        }
        override def onError(ex: Throwable): Unit = {
          connectedPromise.failure(ex)
          subscriber.closeChannel()
          subscriber.onError(ex)
        }
      }
      taskSocketChannel = Option(TaskSocketChannel())
      taskSocketChannel.foreach(_.connect(new InetSocketAddress(host, port)).runAsync(connectCallback))
    }

    connectedPromise.future
  }
} 
Example 81
Source File: UdpIntegrationSpec.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio.udp

import java.net.InetSocketAddress

import minitest.SimpleTestSuite
import monix.eval.Task
import monix.execution.Ack
import monix.execution.Ack.{ Continue, Stop }
import monix.reactive.Observable

import scala.concurrent.duration._
import scala.concurrent.{ Await, Promise }

object UdpIntegrationSpec extends SimpleTestSuite {
  implicit val ctx = monix.execution.Scheduler.Implicits.global

  test("send and receive UDP packets successfully") {
    val data = Array.fill(8)("monix")

    val writes = (ch: TaskDatagramChannel, to: InetSocketAddress) => Observable
      .fromIterable(data)
      .mapEval(data => ch.send(Packet(data.getBytes, to)))

    val readsPromise = Promise[String]()
    val recv = new StringBuilder("")
    val reads = (ch: TaskDatagramChannel, maxSize: Int) => Observable
      .repeatEval(ch.receive(maxSize, 2.seconds))
      .mapEval(t => t)
      .map { packet =>
        packet.foreach(p => recv.append(new String(p.data)))
        packet
      }
      .guaranteeCase(_ => Task(readsPromise.success(recv.mkString)))
      .subscribe(_.fold[Ack](Stop)(_ => Continue))

    val program = for {
      ch <- bind("localhost", 2115).map { ch =>
        reads(ch, 64)
        ch
      }
      sent <- writes(ch, new InetSocketAddress("localhost", 2115)).sumL
      received <- Task.fromFuture(readsPromise.future)
      _ <- ch.close()
    } yield sent == 40 & received == data.mkString("")

    val result = Await.result(program.runToFuture, 10.seconds)
    assert(result)
  }
} 
Example 82
Source File: AsyncSocketChannelSpec.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio.tcp

import java.net.InetSocketAddress
import java.nio.ByteBuffer

import minitest.SimpleTestSuite

import scala.concurrent.Await
import scala.concurrent.duration._

object AsyncSocketChannelSpec extends SimpleTestSuite {

  test("simple connect and write test") {
    implicit val ctx = monix.execution.Scheduler.Implicits.global

    val asyncSocketChannel = AsyncSocketChannel()
    val connectF = asyncSocketChannel.connect(new InetSocketAddress("google.com", 80))

    val data = "Hello world!".getBytes("UTF-8")
    val bytes = ByteBuffer.wrap(data)
    val writeF = connectF
      .flatMap(_ => asyncSocketChannel.write(bytes, Some(4.seconds)))
      .map { result =>
        asyncSocketChannel.stopWriting()
        asyncSocketChannel.close()
        result
      }

    assertEquals(Await.result(writeF, 5.seconds), data.length)
  }

  test("simple connect and read test") {
    implicit val ctx = monix.execution.Scheduler.Implicits.global

    val asyncSocketChannel = AsyncSocketChannel()
    val connectF = asyncSocketChannel.connect(new InetSocketAddress("google.com", 80))

    val buff = ByteBuffer.allocate(0)
    val readF = connectF
      .flatMap(_ => asyncSocketChannel.read(buff, Some(4.seconds)))
      .map { _ =>
        asyncSocketChannel.stopReading()
        asyncSocketChannel.close()
        0
      }

    assertEquals(Await.result(readF, 5.seconds), 0)
  }
} 
Example 83
Source File: Coordinator.scala    From chordial   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.tristanpenman.chordial.dht

import java.net.InetSocketAddress

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.io.{IO, Udp}
import akka.util.Timeout
import com.tristanpenman.chordial.core.Node

import scala.concurrent.duration._
import scala.util.Random

class Coordinator(keyspaceBits: Int, nodeAddress: String, nodePort: Int, seedNode: Option[SeedNode])
    extends Actor
    with ActorLogging {
  import context.system

  require(keyspaceBits > 0, "keyspaceBits must be a positive Int value")

  private val idModulus = 1 << keyspaceBits

  implicit val ec = context.system.dispatcher

  IO(Udp) ! Udp.Bind(self, new InetSocketAddress(nodeAddress, nodePort))

  // How long Node should wait until an algorithm is considered to have timed out. This should be significantly
  // longer than the external request timeout, as some algorithms will make multiple external requests before
  // running to completion
  private val algorithmTimeout = Timeout(5000.milliseconds)

  // How long to wait when making requests that may be routed to other nodes
  private val externalRequestTimeout = Timeout(500.milliseconds)

  // TODO: Research how to handle collisions...
  val firstNodeId = Random.nextLong(idModulus)
  val firstNode = system.actorOf(
    Node.props(firstNodeId, keyspaceBits, algorithmTimeout, externalRequestTimeout, system.eventStream),
    s"node:${firstNodeId}"
  )

  seedNode match {
    case Some(value) =>
      log.info(s"seed node: ${value}")
    case _ =>
      log.info("not using a seed node")
  }

  def receive = {
    case Udp.Bound(local) =>
      context.become(ready(sender()))
  }

  def ready(socket: ActorRef): Receive = {
    case Udp.Received(data, remote) =>
    case Udp.Unbind                 => socket ! Udp.Unbind
    case Udp.Unbound                => context.stop(self)
  }
}

object Coordinator {
  def props(keyspaceBits: Int, nodeAddress: String, nodePort: Int, seedNode: Option[SeedNode]): Props =
    Props(new Coordinator(keyspaceBits, nodeAddress, nodePort, seedNode))
} 
Example 84
Source File: MessageChunkHeader.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network.nio

import java.net.{InetAddress, InetSocketAddress}
import java.nio.ByteBuffer

private[nio] class MessageChunkHeader(
    val typ: Long,
    val id: Int,
    val totalSize: Int,
    val chunkSize: Int,
    val other: Int,
    val hasError: Boolean,
    val securityNeg: Int,
    val address: InetSocketAddress) {
  lazy val buffer = {
    // No need to change this, at 'use' time, we do a reverse lookup of the hostname.
    // Refer to network.Connection
    val ip = address.getAddress.getAddress()
    val port = address.getPort()
    ByteBuffer.
      allocate(MessageChunkHeader.HEADER_SIZE).
      putLong(typ).
      putInt(id).
      putInt(totalSize).
      putInt(chunkSize).
      putInt(other).
      put(if (hasError) 1.asInstanceOf[Byte] else 0.asInstanceOf[Byte]).
      putInt(securityNeg).
      putInt(ip.size).
      put(ip).
      putInt(port).
      position(MessageChunkHeader.HEADER_SIZE).
      flip.asInstanceOf[ByteBuffer]
  }

  override def toString = "" + this.getClass.getSimpleName + ":" + id + " of type " + typ +
      " and sizes " + totalSize + " / " + chunkSize + " bytes, securityNeg: " + securityNeg

}


private[nio] object MessageChunkHeader {
  val HEADER_SIZE = 45

  def create(buffer: ByteBuffer): MessageChunkHeader = {
    if (buffer.remaining != HEADER_SIZE) {
      throw new IllegalArgumentException("Cannot convert buffer data to Message")
    }
    val typ = buffer.getLong()
    val id = buffer.getInt()
    val totalSize = buffer.getInt()
    val chunkSize = buffer.getInt()
    val other = buffer.getInt()
    val hasError = buffer.get() != 0
    val securityNeg = buffer.getInt()
    val ipSize = buffer.getInt()
    val ipBytes = new Array[Byte](ipSize)
    buffer.get(ipBytes)
    val ip = InetAddress.getByAddress(ipBytes)
    val port = buffer.getInt()
    new MessageChunkHeader(typ, id, totalSize, chunkSize, other, hasError, securityNeg,
      new InetSocketAddress(ip, port))
  }
} 
Example 85
Source File: Message.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network.nio

import java.net.InetSocketAddress
import java.nio.ByteBuffer

import scala.collection.mutable.ArrayBuffer

import com.google.common.base.Charsets.UTF_8

import org.apache.spark.util.Utils

private[nio] abstract class Message(val typ: Long, val id: Int) {
  var senderAddress: InetSocketAddress = null
  var started = false
  var startTime = -1L
  var finishTime = -1L
  var isSecurityNeg = false
  var hasError = false

  def size: Int

  def getChunkForSending(maxChunkSize: Int): Option[MessageChunk]

  def getChunkForReceiving(chunkSize: Int): Option[MessageChunk]

  def timeTaken(): String = (finishTime - startTime).toString + " ms"

  override def toString = this.getClass.getSimpleName + "(id = " + id + ", size = " + size + ")"
}


private[nio] object Message {
  val BUFFER_MESSAGE = 1111111111L

  var lastId = 1

  def getNewId() = synchronized {
    lastId += 1
    if (lastId == 0) {
      lastId += 1
    }
    lastId
  }

  def createBufferMessage(dataBuffers: Seq[ByteBuffer], ackId: Int): BufferMessage = {
    if (dataBuffers == null) {
      return new BufferMessage(getNewId(), new ArrayBuffer[ByteBuffer], ackId)
    }
    if (dataBuffers.exists(_ == null)) {
      throw new Exception("Attempting to create buffer message with null buffer")
    }
    new BufferMessage(getNewId(), new ArrayBuffer[ByteBuffer] ++= dataBuffers, ackId)
  }

  def createBufferMessage(dataBuffers: Seq[ByteBuffer]): BufferMessage =
    createBufferMessage(dataBuffers, 0)

  def createBufferMessage(dataBuffer: ByteBuffer, ackId: Int): BufferMessage = {
    if (dataBuffer == null) {
      createBufferMessage(Array(ByteBuffer.allocate(0)), ackId)
    } else {
      createBufferMessage(Array(dataBuffer), ackId)
    }
  }

  def createBufferMessage(dataBuffer: ByteBuffer): BufferMessage =
    createBufferMessage(dataBuffer, 0)

  def createBufferMessage(ackId: Int): BufferMessage = {
    createBufferMessage(new Array[ByteBuffer](0), ackId)
  }

  
  def createErrorMessage(exception: Exception, ackId: Int): BufferMessage = {
    val exceptionString = Utils.exceptionString(exception)
    val serializedExceptionString = ByteBuffer.wrap(exceptionString.getBytes(UTF_8))
    val errorMessage = createBufferMessage(serializedExceptionString, ackId)
    errorMessage.hasError = true
    errorMessage
  }

  def create(header: MessageChunkHeader): Message = {
    val newMessage: Message = header.typ match {
      case BUFFER_MESSAGE => new BufferMessage(header.id,
        ArrayBuffer(ByteBuffer.allocate(header.totalSize)), header.other)
    }
    newMessage.hasError = header.hasError
    newMessage.senderAddress = header.address
    newMessage
  }
} 
Example 86
Source File: GraphiteSink.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.net.InetSocketAddress
import java.util.Properties
import java.util.concurrent.TimeUnit

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.graphite.{GraphiteUDP, Graphite, GraphiteReporter}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class GraphiteSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val GRAPHITE_DEFAULT_PERIOD = 10
  val GRAPHITE_DEFAULT_UNIT = "SECONDS"
  val GRAPHITE_DEFAULT_PREFIX = ""

  val GRAPHITE_KEY_HOST = "host"
  val GRAPHITE_KEY_PORT = "port"
  val GRAPHITE_KEY_PERIOD = "period"
  val GRAPHITE_KEY_UNIT = "unit"
  val GRAPHITE_KEY_PREFIX = "prefix"
  val GRAPHITE_KEY_PROTOCOL = "protocol"

  def propertyToOption(prop: String): Option[String] = Option(property.getProperty(prop))

  if (!propertyToOption(GRAPHITE_KEY_HOST).isDefined) {
    throw new Exception("Graphite sink requires 'host' property.")
  }

  if (!propertyToOption(GRAPHITE_KEY_PORT).isDefined) {
    throw new Exception("Graphite sink requires 'port' property.")
  }

  val host = propertyToOption(GRAPHITE_KEY_HOST).get
  val port = propertyToOption(GRAPHITE_KEY_PORT).get.toInt

  val pollPeriod = propertyToOption(GRAPHITE_KEY_PERIOD) match {
    case Some(s) => s.toInt
    case None => GRAPHITE_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = propertyToOption(GRAPHITE_KEY_UNIT) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
    case None => TimeUnit.valueOf(GRAPHITE_DEFAULT_UNIT)
  }

  val prefix = propertyToOption(GRAPHITE_KEY_PREFIX).getOrElse(GRAPHITE_DEFAULT_PREFIX)

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val graphite = propertyToOption(GRAPHITE_KEY_PROTOCOL).map(_.toLowerCase) match {
    case Some("udp") => new GraphiteUDP(new InetSocketAddress(host, port))
    case Some("tcp") | None => new Graphite(new InetSocketAddress(host, port))
    case Some(p) => throw new Exception(s"Invalid Graphite protocol: $p")
  }

  val reporter: GraphiteReporter = GraphiteReporter.forRegistry(registry)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .prefixedWith(prefix)
      .build(graphite)

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 87
Source File: RealmConnector.scala    From wowchat   with GNU General Public License v3.0 5 votes vote down vote up
package wowchat.realm

import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

import wowchat.common._
import com.typesafe.scalalogging.StrictLogging
import io.netty.bootstrap.Bootstrap
import io.netty.channel.socket.SocketChannel
import io.netty.channel.socket.nio.NioSocketChannel
import io.netty.channel.{Channel, ChannelInitializer, ChannelOption}
import io.netty.handler.timeout.IdleStateHandler
import io.netty.util.concurrent.Future

import scala.util.Try

class RealmConnector(realmConnectionCallback: RealmConnectionCallback) extends StrictLogging {

  private var channel: Option[Channel] = None
  private var connected: Boolean = false

  def connect: Unit = {
    logger.info(s"Connecting to realm server ${Global.config.wow.realmlist.host}:${Global.config.wow.realmlist.port}")

    val bootstrap = new Bootstrap
    bootstrap.group(Global.group)
      .channel(classOf[NioSocketChannel])
      .option[java.lang.Integer](ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000)
      .option[java.lang.Boolean](ChannelOption.SO_KEEPALIVE, true)
      .remoteAddress(new InetSocketAddress(Global.config.wow.realmlist.host, Global.config.wow.realmlist.port))
      .handler(new ChannelInitializer[SocketChannel]() {

        @throws[Exception]
        override protected def initChannel(socketChannel: SocketChannel): Unit = {
          val handler = if (WowChatConfig.getExpansion == WowExpansion.Vanilla) {
            new RealmPacketHandler(realmConnectionCallback)
          } else {
            new RealmPacketHandlerTBC(realmConnectionCallback)
          }

          socketChannel.pipeline.addLast(
            new IdleStateHandler(60, 120, 0),
            new IdleStateCallback,
            new RealmPacketDecoder,
            new RealmPacketEncoder,
            handler
          )
        }
      })

    channel = Some(bootstrap.connect.addListener((future: Future[_ >: Void]) => {
      Try {
        future.get(10, TimeUnit.SECONDS)
      }.fold(throwable => {
        logger.error(s"Failed to connect to realm server! ${throwable.getMessage}")
        realmConnectionCallback.disconnected
      }, _ => Unit)
    }).channel)
  }
} 
Example 88
Source File: EmbeddedZookeeper.scala    From embedded-kafka   with Apache License 2.0 5 votes vote down vote up
package com.tuplejump.embedded.kafka

import java.io.{File => JFile}
import java.net.InetSocketAddress
import java.util.concurrent.atomic.AtomicReference

import scala.util.Try
import org.I0Itec.zkclient.exception.ZkMarshallingError
import org.I0Itec.zkclient.serialize.ZkSerializer
import org.apache.zookeeper.server.{ NIOServerCnxnFactory, ZooKeeperServer }


  def start(): Unit = {
    val server = new ZooKeeperServer(snapDir, dataDir, tickTime)
    _zookeeper.set(Some(server))

    val (ip, port) = {
      val splits = connectTo.split(":")
      (splits(0), splits(1).toInt)
    }

    val f = new NIOServerCnxnFactory()
    f.configure(new InetSocketAddress(ip, port), 16)
    f.startup(server)

    _factory.set(Some(f))

    logger.info(s"ZooKeeperServer isRunning: $isRunning")
  }

  def shutdown(): Unit = {
    logger.info(s"Shutting down ZK NIOServerCnxnFactory.")

    for (v <- _factory.get) v.shutdown()
    _factory.set(None)

    for (v <- _zookeeper.get) {
      Try(v.shutdown())
      //awaitCond(!v.isRunning, 2000.millis)
      logger.info(s"ZooKeeper server shut down.")
    }
    _zookeeper.set(None)
  }
}

object DefaultStringSerializer extends ZkSerializer {

  @throws(classOf[ZkMarshallingError])
  def serialize(data: Object): Array[Byte] = data match {
    case a: String => a.getBytes("UTF-8")
    case _         => throw new ZkMarshallingError(s"Unsupported type '${data.getClass}'")
  }

  @throws(classOf[ZkMarshallingError])
  def deserialize(bytes: Array[Byte]): Object = bytes match {
    case b if Option(b).isEmpty => "" //ick
    case b                      => new String(bytes, "UTF-8")
  }
} 
Example 89
Source File: LogEvent.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter13

import java.net.InetSocketAddress


object LogEvent {
  val SEPARATOR: Byte = ':'.toByte

  def apply(logfile: String, msg: String): LogEvent = {
    LogEvent(null, -1, logfile, msg)
  }
}

case class LogEvent(
  source:   InetSocketAddress,
  received: Long,
  logfile:  String,
  msg:      String
) {
  override def toString: String = {
    s"$received [${source.toString}] [$logfile] : $msg"
  }
} 
Example 90
Source File: LogEventMonitor.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter13

import io.netty.bootstrap.Bootstrap
import io.netty.channel._
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.NioDatagramChannel
import java.net.InetSocketAddress
import java.lang.{ Boolean ⇒ JBoolean }


object LogEventMonitor {

  @throws[Exception]
  def main(args: Array[String]): Unit = {
    if (args.length != 1)
      throw new IllegalArgumentException("Usage: LogEventMonitor <port>")

    //构造一个新的 LogEventMonitor
    val monitor = new LogEventMonitor(new InetSocketAddress(args(0).toInt))
    try {
      val channel = monitor.bind()
      println("LogEventMonitor running")
      channel.closeFuture.sync()
    } finally {
      monitor.stop()
    }
  }
}

class LogEventMonitor(address: InetSocketAddress) {
  val group: EventLoopGroup = new NioEventLoopGroup
  val bootstrap = new Bootstrap
  //引导该 NioDatagramChannel
  bootstrap.group(group)
    .channel(classOf[NioDatagramChannel])
    //设置套接字选项 SO_BROADCAST
    .option[JBoolean](ChannelOption.SO_BROADCAST, true)
    .handler(new ChannelInitializer[Channel]() {
      @throws[Exception]
      override protected def initChannel(channel: Channel): Unit = {
        val pipeline = channel.pipeline
        //将 LogEventDecoder 和 LogEventHandler 添加到 ChannelPipeline 中
        pipeline.addLast(new LogEventDecoder)
        pipeline.addLast(new LogEventHandler)
      }
    }).localAddress(address)

  def bind(): Channel = { //绑定 Channel。注意,DatagramChannel 是无连接的
    bootstrap.bind.syncUninterruptibly.channel
  }

  def stop(): Unit = {
    group.shutdownGracefully
  }
} 
Example 91
Source File: LogEventBroadcaster.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter13

import io.netty.bootstrap.Bootstrap
import io.netty.channel.{ ChannelOption, EventLoopGroup }
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.NioDatagramChannel
import java.io.File
import java.io.RandomAccessFile
import java.net.InetSocketAddress
import java.lang.{ Boolean ⇒ JBoolean }
import java.util.Objects

import scala.util.control.Breaks._


object LogEventBroadcaster {

  @throws[Exception]
  def main(args: Array[String]): Unit = {
    if (args.length != 2)
      throw new IllegalArgumentException

    //创建并启动一个新的 LogEventBroadcaster 的实例
    val broadcaster =
      new LogEventBroadcaster(new InetSocketAddress("255.255.255.255", args(0).toInt), new File(args(1)))

    try {
      broadcaster.run()
    } finally {
      broadcaster.stop()
    }
  }
}

class LogEventBroadcaster(address: InetSocketAddress, file: File) {
  val group: EventLoopGroup = new NioEventLoopGroup
  val bootstrap = new Bootstrap

  //引导该 NioDatagramChannel(无连接的)
  bootstrap
    .group(group)
    .channel(classOf[NioDatagramChannel])
    //设置 SO_BROADCAST 套接字选项
    .option[JBoolean](ChannelOption.SO_BROADCAST, true)
    .handler(new LogEventEncoder(address))

  @throws[Exception]
  def run(): Unit = { //绑定 Channel
    val ch = bootstrap.bind(0).sync.channel
    var pointer: Long = 0
    //启动主处理循环

    breakable {
      while (true) {
        val len = file.length
        if (len < pointer) { // file was reset
          //如果有必要,将文件指针设置到该文件的最后一个字节
          pointer = len
        } else if (len > pointer) { // Content was added
          val raf = new RandomAccessFile(file, "r")
          //设置当前的文件指针,以确保没有任何的旧日志被发送
          raf.seek(pointer)
          Iterator.continually(raf.readLine())
            .takeWhile(Objects.nonNull)
            .foreach { line ⇒
              ch.writeAndFlush(LogEvent(file.getAbsolutePath, line))
            }
          //存储其在文件中的当前位置
          pointer = raf.getFilePointer
          raf.close()
        }
        try {
          //休眠 1 秒,如果被中断,则退出循环;否则重新处理它
          Thread.sleep(1000)
        } catch {
          case e: InterruptedException ⇒
            Thread.interrupted
            break
        }
      }
    }
  }

  def stop(): Unit = {
    group.shutdownGracefully()
  }
} 
Example 92
Source File: LogEventEncoder.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter13

import io.netty.channel.ChannelHandlerContext
import io.netty.channel.socket.DatagramPacket
import io.netty.handler.codec.MessageToMessageEncoder
import io.netty.util.CharsetUtil
import java.net.InetSocketAddress
import java.util


// LogEventEncoder 创建了即将被发送到指定的 InetSocketAddress 的 DatagramPacket 消息
class LogEventEncoder(remoteAddress: InetSocketAddress)
  extends MessageToMessageEncoder[LogEvent] {

  @throws[Exception]
  override protected def encode(
    channelHandlerContext: ChannelHandlerContext,
    logEvent:              LogEvent,
    out:                   util.List[AnyRef]): Unit = {
    val file = logEvent.logfile.getBytes(CharsetUtil.UTF_8)
    val msg = logEvent.msg.getBytes(CharsetUtil.UTF_8)
    val buf = channelHandlerContext.alloc.buffer(file.length + msg.length + 1)
    //将文件名写入到 ByteBuf 中
    buf.writeBytes(file)
    //添加一个 SEPARATOR
    buf.writeByte(LogEvent.SEPARATOR)
    //将日志消息写入 ByteBuf 中
    buf.writeBytes(msg)
    //将一个拥有数据和目的地地址的新 DatagramPacket 添加到出站的消息列表中
    out.add(new DatagramPacket(buf, remoteAddress))
  }
} 
Example 93
Source File: NettyOioServer.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter4

import io.netty.bootstrap.ServerBootstrap
import io.netty.buffer.Unpooled
import io.netty.channel._
import io.netty.channel.oio.OioEventLoopGroup
import io.netty.channel.socket.SocketChannel
import io.netty.channel.socket.oio.OioServerSocketChannel
import java.net.InetSocketAddress
import java.nio.charset.Charset


class NettyOioServer {
  @throws[Exception]
  def server(port: Int): Unit = {
    val buf = Unpooled.unreleasableBuffer(Unpooled.copiedBuffer("Hi!\r\n", Charset.forName("UTF-8")))
    val group: EventLoopGroup = new OioEventLoopGroup
    try {
      //创建 ServerBootstrap
      val b = new ServerBootstrap
      b.group(group)
        //使用 OioEventLoopGroup以允许阻塞模式(旧的I/O)
        .channel(classOf[OioServerSocketChannel])
        .localAddress(new InetSocketAddress(port))
        //指定 ChannelInitializer,对于每个已接受的连接都调用它
        .childHandler {
          new ChannelInitializer[SocketChannel]() {
            @throws[Exception]
            override def initChannel(ch: SocketChannel): Unit = {
              ch.pipeline.addLast(new ChannelInboundHandlerAdapter() {
                @throws[Exception]
                override def channelActive(ctx: ChannelHandlerContext): Unit = {
                  ctx.writeAndFlush(buf.duplicate).addListener( //将消息写到客户端,并添加 ChannelFutureListener,
                    //以便消息一被写完就关闭连接
                    ChannelFutureListener.CLOSE)
                }
              })
            }
          }
        }
      //绑定服务器以接受连接
      val f = b.bind.sync()
      f.channel.closeFuture.sync()
    } finally {
      //释放所有的资源
      group.shutdownGracefully.sync()
    }
  }
} 
Example 94
Source File: NettyNioServer.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter4

import io.netty.bootstrap.ServerBootstrap
import io.netty.buffer.Unpooled
import io.netty.channel._
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.SocketChannel
import io.netty.channel.socket.nio.NioServerSocketChannel
import java.net.InetSocketAddress
import java.nio.charset.Charset


class NettyNioServer {
  @throws[Exception]
  def server(port: Int): Unit = {
    val buf = Unpooled.unreleasableBuffer(Unpooled.copiedBuffer("Hi!\r\n", Charset.forName("UTF-8")))
    //为非阻塞模式使用NioEventLoopGroup
    val group: EventLoopGroup = new NioEventLoopGroup
    try { //创建ServerBootstrap
      val b = new ServerBootstrap
      b.group(group)
        .channel(classOf[NioServerSocketChannel])
        .localAddress(new InetSocketAddress(port))
        //指定 ChannelInitializer,对于每个已接受的连接都调用它
        .childHandler {
          new ChannelInitializer[SocketChannel]() {
            @throws[Exception]
            override def initChannel(ch: SocketChannel): Unit = {
              ch.pipeline.addLast(new ChannelInboundHandlerAdapter() {
                @throws[Exception]
                override def channelActive(ctx: ChannelHandlerContext): Unit = {
                  //将消息写到客户端,并添加ChannelFutureListener,
                  //以便消息一被写完就关闭连接
                  ctx.writeAndFlush(buf.duplicate)
                    .addListener(ChannelFutureListener.CLOSE)
                }
              })
            }
          }
        }
      //绑定服务器以接受连接
      val f = b.bind.sync()
      f.channel.closeFuture.sync()
    } finally {
      //释放所有的资源
      group.shutdownGracefully.sync()
    }
  }
} 
Example 95
Source File: EchoClient.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter2.echoclient

import io.netty.bootstrap.Bootstrap
import io.netty.channel.ChannelInitializer
import io.netty.channel.EventLoopGroup
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.SocketChannel
import io.netty.channel.socket.nio.NioSocketChannel
import java.net.InetSocketAddress


object EchoClient {
  @throws[Exception]
  def main(args: Array[String]): Unit = {
    if (args.length != 2) {
      System.err.println("Usage: " + classOf[EchoClient].getSimpleName + " <host> <port>")
    } else {
      val host = args(0)
      val port = args(1).toInt
      new EchoClient(host, port).start()
    }
  }
}

class EchoClient(val host: String, val port: Int) {
  @throws[Exception]
  def start(): Unit = {
    val group: EventLoopGroup = new NioEventLoopGroup
    try {
      //创建 Bootstrap
      val b = new Bootstrap
      //指定 EventLoopGroup 以处理客户端事件;需要适用于 NIO 的实现
      b.group(group)
        //适用于 NIO 传输的Channel 类型
        .channel(classOf[NioSocketChannel])
        //设置服务器的InetSocketAddress
        .remoteAddress(new InetSocketAddress(host, port))
        //在创建Channel时,向 ChannelPipeline中添加一个 EchoClientHandler实例
        .handler {
          new ChannelInitializer[SocketChannel]() {
            @throws[Exception]
            override def initChannel(ch: SocketChannel): Unit = {
              ch.pipeline.addLast(new EchoClientHandler)
            }
          }
        }
      //连接到远程节点,阻塞等待直到连接完成
      val f = b.connect.sync()
      //阻塞,直到Channel 关闭
      f.channel.closeFuture.sync()
    } finally {
      //关闭线程池并且释放所有的资源
      group.shutdownGracefully.sync()
    }
  }
} 
Example 96
Source File: BootstrapClient.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter8

import io.netty.bootstrap.Bootstrap
import io.netty.buffer.ByteBuf
import io.netty.channel._
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.NioSocketChannel
import java.net.InetSocketAddress


  def bootstrap(): Unit = {
    //设置 EventLoopGroup,提供用于处理 Channel 事件的 EventLoop
    val group: EventLoopGroup = new NioEventLoopGroup
    //创建一个Bootstrap类的实例以创建和连接新的客户端Channel
    val bootstrap = new Bootstrap
    bootstrap.group(group)
      //指定要使用的Channel 实现
      .channel(classOf[NioSocketChannel])
      //设置用于 Channel 事件和数据的ChannelInboundHandler
      .handler {
        new SimpleChannelInboundHandler[ByteBuf]() {
          @throws[Exception]
          override protected def channelRead0(
            channelHandlerContext: ChannelHandlerContext,
            byteBuf:               ByteBuf): Unit = {
            println("Received data")
          }
        }
      }
    //连接到远程主机
    val future = bootstrap.connect(new InetSocketAddress("www.manning.com", 80))
    future.addListener(new ChannelFutureListener() {
      @throws[Exception]
      override def operationComplete(channelFuture: ChannelFuture): Unit = {
        if (channelFuture.isSuccess)
          println("Connection established")
        else {
          System.err.println("Connection attempt failed")
          channelFuture.cause.printStackTrace()
        }
      }
    })
  }
} 
Example 97
Source File: InvalidBootstrapClient.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter8

import io.netty.bootstrap.Bootstrap
import io.netty.buffer.ByteBuf
import io.netty.channel.ChannelHandlerContext
import io.netty.channel.SimpleChannelInboundHandler
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.oio.OioSocketChannel
import java.net.InetSocketAddress


  def bootstrap(): Unit = {
    val group = new NioEventLoopGroup
    //创建一个新的 Bootstrap 类的实例,以创建新的客户端Channel
    val bootstrap = new Bootstrap
    //指定一个适用于 NIO 的 EventLoopGroup 实现
    bootstrap.group(group)
      //指定一个适用于 OIO 的 Channel 实现类
      .channel(classOf[OioSocketChannel])
      //设置一个用于处理 Channel的 I/O 事件和数据的 ChannelInboundHandler
      .handler {
        new SimpleChannelInboundHandler[ByteBuf]() {
          @throws[Exception]
          override protected def channelRead0(channelHandlerContext: ChannelHandlerContext, byteBuf: ByteBuf): Unit = {
            println("Received data")
          }
        }
      }
    //尝试连接到远程节点
    val future = bootstrap.connect(new InetSocketAddress("www.manning.com", 80))
    future.syncUninterruptibly
  }
} 
Example 98
Source File: GracefulShutdown.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter8

import io.netty.bootstrap.Bootstrap
import io.netty.buffer.ByteBuf
import io.netty.channel._
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.NioSocketChannel
import io.netty.util.concurrent.Future
import java.net.InetSocketAddress


  def bootstrap(): Unit = {
    //创建处理 I/O 的EventLoopGroup
    val group = new NioEventLoopGroup

    //创建一个 Bootstrap 类的实例并配置它
    val bootstrap = new Bootstrap
    bootstrap.group(group)
      .channel(classOf[NioSocketChannel])
      .handler(
        new SimpleChannelInboundHandler[ByteBuf]() {
          @throws[Exception]
          override protected def channelRead0(channelHandlerContext: ChannelHandlerContext, byteBuf: ByteBuf): Unit = {
            System.out.println("Received data")
          }
        }
      )
    bootstrap.connect(new InetSocketAddress("www.manning.com", 80)).syncUninterruptibly()

    //shutdownGracefully()方法将释放所有的资源,并且关闭所有的当前正在使用中的 Channel
    val future = group.shutdownGracefully()
    // block until the group has shutdown
    future.syncUninterruptibly()
  }
} 
Example 99
Source File: BootstrapSharingEventLoopGroup.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter8

import io.netty.bootstrap.Bootstrap
import io.netty.bootstrap.ServerBootstrap
import io.netty.buffer.ByteBuf
import io.netty.channel.ChannelFuture
import io.netty.channel.ChannelFutureListener
import io.netty.channel.ChannelHandlerContext
import io.netty.channel.SimpleChannelInboundHandler
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.NioServerSocketChannel
import io.netty.channel.socket.nio.NioSocketChannel
import java.net.InetSocketAddress


  def bootstrap(): Unit = { //创建 ServerBootstrap 以创建 ServerSocketChannel,并绑定它
    val bootstrap = new ServerBootstrap
    //设置 EventLoopGroup,其将提供用以处理 Channel 事件的 EventLoop
    bootstrap.group(new NioEventLoopGroup, new NioEventLoopGroup)
      //指定要使用的 Channel 实现
      .channel(classOf[NioServerSocketChannel])
      //设置用于处理已被接受的子 Channel 的 I/O 和数据的 ChannelInboundHandler
      .childHandler {
        new SimpleChannelInboundHandler[ByteBuf]() {
          private[chapter8] var connectFuture: ChannelFuture = _

          @throws[Exception]
          override def channelActive(ctx: ChannelHandlerContext): Unit = {
            //创建一个 Bootstrap 类的实例以连接到远程主机
            val bootstrap = new Bootstrap
            //指定 Channel 的实现
            bootstrap.channel(classOf[NioSocketChannel])
              .handler(new SimpleChannelInboundHandler[ByteBuf]() {
                @throws[Exception]
                override protected def channelRead0(
                  ctx: ChannelHandlerContext,
                  in:  ByteBuf): Unit = {
                  println("Received data")
                }
              })
            //使用与分配给已被接受的子Channel相同的EventLoop
            bootstrap.group(ctx.channel.eventLoop)
            //连接到远程节点
            connectFuture = bootstrap.connect(new InetSocketAddress("www.manning.com", 80))
          }

          @throws[Exception]
          override protected def channelRead0(
            channelHandlerContext: ChannelHandlerContext,
            byteBuf:               ByteBuf): Unit = {
            if (connectFuture.isDone) {
              //当连接完成时,执行一些数据操作(如代理)
              // do something with the data
            }
          }
        }
      }

    //通过配置好的 ServerBootstrap 绑定该 ServerSocketChannel
    val future = bootstrap.bind(new InetSocketAddress(8080))
    future.addListener(new ChannelFutureListener() {
      @throws[Exception]
      override def operationComplete(channelFuture: ChannelFuture): Unit = {
        if (channelFuture.isSuccess) System.out.println("Server bound")
        else {
          System.err.println("Bind attempt failed")
          channelFuture.cause.printStackTrace()
        }
      }
    })
  }
} 
Example 100
Source File: BootstrapServer.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter8

import io.netty.bootstrap.ServerBootstrap
import io.netty.buffer.ByteBuf
import io.netty.channel.ChannelFuture
import io.netty.channel.ChannelFutureListener
import io.netty.channel.ChannelHandlerContext
import io.netty.channel.SimpleChannelInboundHandler
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.NioServerSocketChannel
import java.net.InetSocketAddress


  def bootstrap(): Unit = {
    val group = new NioEventLoopGroup
    //创建 Server Bootstrap
    val bootstrap = new ServerBootstrap
    //设置 EventLoopGroup,其提供了用于处理 Channel 事件的EventLoop
    bootstrap.group(group)
      //指定要使用的 Channel 实现
      .channel(classOf[NioServerSocketChannel])
      //设置用于处理已被接受的子 Channel 的I/O及数据的 ChannelInboundHandler
      .childHandler {
        new SimpleChannelInboundHandler[ByteBuf]() {
          @throws[Exception]
          override protected def channelRead0(channelHandlerContext: ChannelHandlerContext, byteBuf: ByteBuf): Unit = {
            System.out.println("Received data")
          }
        }
      }

    //通过配置好的 ServerBootstrap 的实例绑定该 Channel
    val future = bootstrap.bind(new InetSocketAddress(8080))
    future.addListener(new ChannelFutureListener() {
      @throws[Exception]
      override def operationComplete(channelFuture: ChannelFuture): Unit = {
        if (channelFuture.isSuccess) System.out.println("Server bound")
        else {
          System.err.println("Bind attempt failed")
          channelFuture.cause.printStackTrace()
        }
      }
    })
  }
} 
Example 101
Source File: BootstrapDatagramChannel.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter8

import io.netty.bootstrap.Bootstrap
import io.netty.channel.ChannelFuture
import io.netty.channel.ChannelFutureListener
import io.netty.channel.ChannelHandlerContext
import io.netty.channel.SimpleChannelInboundHandler
import io.netty.channel.oio.OioEventLoopGroup
import io.netty.channel.socket.DatagramPacket
import io.netty.channel.socket.oio.OioDatagramChannel
import java.net.InetSocketAddress


  def bootstrap(): Unit = {
    //创建一个 Bootstrap 的实例以创建和绑定新的数据报 Channel
    val bootstrap = new Bootstrap
    //设置 EventLoopGroup,其提供了用以处理 Channel 事件的 EventLoop
    bootstrap.group(new OioEventLoopGroup)
      //指定 Channel 的实现
      .channel(classOf[OioDatagramChannel])
      .handler(new SimpleChannelInboundHandler[DatagramPacket]() {
        @throws[Exception]
        override def channelRead0(ctx: ChannelHandlerContext, msg: DatagramPacket): Unit = {
          // Do something with the packet
        }
      })

    //调用 bind() 方法,因为该协议是无连接的
    val future = bootstrap.bind(new InetSocketAddress(0))
    future.addListener(new ChannelFutureListener() {
      @throws[Exception]
      override def operationComplete(channelFuture: ChannelFuture): Unit = {
        if (channelFuture.isSuccess)
          println("Channel bound")
        else {
          System.err.println("Bind attempt failed")
          channelFuture.cause.printStackTrace()
        }
      }
    })
  }
} 
Example 102
Source File: BootstrapClientWithOptionsAndAttrs.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter8

import io.netty.bootstrap.Bootstrap
import io.netty.buffer.ByteBuf
import io.netty.channel.ChannelHandlerContext
import io.netty.channel.ChannelOption
import io.netty.channel.SimpleChannelInboundHandler
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.NioSocketChannel
import io.netty.util.AttributeKey
import java.net.InetSocketAddress
import java.lang.{ Boolean ⇒ JBoolean }


  def bootstrap(): Unit = { //创建一个 AttributeKey 以标识该属性
    val id: AttributeKey[Integer] = AttributeKey.newInstance("ID")
    //创建一个 Bootstrap 类的实例以创建客户端 Channel 并连接它们
    val bootstrap = new Bootstrap
    //设置 EventLoopGroup,其提供了用以处理 Channel 事件的 EventLoop
    bootstrap.group(new NioEventLoopGroup)
      .channel(classOf[NioSocketChannel])
      .handler(new SimpleChannelInboundHandler[ByteBuf]() {
        @throws[Exception]
        override def channelRegistered(ctx: ChannelHandlerContext): Unit = { //使用 AttributeKey 检索属性以及它的值
          val idValue = ctx.channel.attr(id).get
          // do something with the idValue
        }

        @throws[Exception]
        override protected def channelRead0(channelHandlerContext: ChannelHandlerContext, byteBuf: ByteBuf): Unit = {
          System.out.println("Received data")
        }
      })

    //设置 ChannelOption,其将在 connect()或者bind()方法被调用时被设置到已经创建的 Channel 上
    bootstrap
      .option[JBoolean](ChannelOption.SO_KEEPALIVE, true)
      .option[Integer](ChannelOption.CONNECT_TIMEOUT_MILLIS, 5000)

    //存储该 id 属性
    bootstrap.attr[Integer](id, 123456)

    //使用配置好的 Bootstrap 实例连接到远程主机
    val future = bootstrap.connect(new InetSocketAddress("www.manning.com", 80))
    future.syncUninterruptibly
  }
} 
Example 103
Source File: BootstrapWithInitializer.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter8

import io.netty.bootstrap.ServerBootstrap
import io.netty.channel._
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.NioServerSocketChannel
import io.netty.handler.codec.http.HttpClientCodec
import io.netty.handler.codec.http.HttpObjectAggregator
import java.net.InetSocketAddress


  @throws[InterruptedException]
  def bootstrap(): Unit = {
    //创建 ServerBootstrap 以创建和绑定新的 Channel
    val bootstrap = new ServerBootstrap

    //设置 EventLoopGroup,其将提供用以处理 Channel 事件的 EventLoop
    bootstrap.group(new NioEventLoopGroup, new NioEventLoopGroup)
      .channel(classOf[NioServerSocketChannel]) //指定 Channel 的实现
      //注册一个 ChannelInitializerImpl 的实例来设置 ChannelPipeline
      .childHandler(new ChannelInitializerImpl)

    //绑定到地址
    val future = bootstrap.bind(new InetSocketAddress(8080))
    future.sync
  }

  //用以设置 ChannelPipeline 的自定义 ChannelInitializerImpl 实现
  final private[chapter8] class ChannelInitializerImpl extends ChannelInitializer[Channel] {
    //将所需的 ChannelHandler 添加到 ChannelPipeline
    @throws[Exception]
    override protected def initChannel(ch: Channel): Unit = {
      val pipeline = ch.pipeline
      pipeline.addLast(new HttpClientCodec)
      pipeline.addLast(new HttpObjectAggregator(Integer.MAX_VALUE))
    }
  }
} 
Example 104
Source File: MongoClientUri.scala    From tepkin   with Apache License 2.0 5 votes vote down vote up
package net.fehmicansaglam.tepkin

import java.net.InetSocketAddress

import net.fehmicansaglam.tepkin.MongoClientUriParser._

case class MongoCredentials(username: String, password: Option[String] = None)


case class MongoClientUri(credentials: Option[MongoCredentials] = None,
                          hosts: Set[InetSocketAddress],
                          database: Option[String] = None,
                          options: Map[String, String] = Map.empty) {
  def option(key: String): Option[String] = options.get(key)
}

object MongoClientUri {
  def apply(input: String): MongoClientUri = MongoClientUriParser.parseAll(uri, input) match {
    case Success(mongoUri, _) => mongoUri
    case failure: NoSuccess => throw new IllegalArgumentException(failure.msg)
  }
} 
Example 105
Source File: MongoClientUriParser.scala    From tepkin   with Apache License 2.0 5 votes vote down vote up
package net.fehmicansaglam.tepkin

import java.net.InetSocketAddress

import scala.util.parsing.combinator.RegexParsers

object MongoClientUriParser extends RegexParsers {
  def credential: Parser[String] = """[^:@?]+""".r ^^ {
    _.toString
  }

  def hostName: Parser[String] = """[^:,?/]+""".r ^^ {
    _.toString
  }

  def port: Parser[Int] = """[0-9]+""".r ^^ {
    _.toInt
  }

  def database: Parser[String] = """[^?]+""".r ^^ {
    _.toString
  }

  def option: Parser[(String, String)] = """[^=]+""".r ~ "=" ~ """[^&]+""".r ^^ {
    case key ~ _ ~ value => key -> value
  }

  def options: Parser[Map[String, String]] = option ~ rep("&" ~ option) ^^ {
    case head ~ tail => (head +: tail.map(_._2)).toMap
  }

  def credentials: Parser[MongoCredentials] = credential ~ opt(":" ~ credential) ^^ {
    case username ~ None =>
      MongoCredentials(username = username)
    case username ~ Some(":" ~ password) =>
      MongoCredentials(username = username, password = Some(password))
  }

  def host: Parser[InetSocketAddress] = hostName ~ opt(":" ~ port) ^^ {
    case hostName ~ None => new InetSocketAddress(hostName, 27017)
    case hostName ~ Some(":" ~ port) => new InetSocketAddress(hostName, port)
  }

  def uri: Parser[MongoClientUri] = {
    "mongodb://" ~ opt(credentials ~ "@") ~ host ~ rep("," ~ host) ~ opt("/" ~ opt(database) ~ opt("?" ~ options)) ^^ {
      case _ ~ credentials ~ host ~ hosts ~ None =>
        MongoClientUri(
          credentials = credentials.map(_._1),
          hosts = hosts.map(_._2).toSet + host
        )

      case _ ~ credentials ~ host ~ hosts ~ Some(_ ~ database ~ options) =>
        MongoClientUri(
          credentials = credentials.map(_._1),
          hosts = hosts.map(_._2).toSet + host,
          database = database,
          options = options.map(_._2).getOrElse(Map.empty)
        )
    }
  }
} 
Example 106
Source File: MongoClientUriSpec.scala    From tepkin   with Apache License 2.0 5 votes vote down vote up
package net.fehmicansaglam.tepkin

import java.net.InetSocketAddress

import org.scalatest.{FlatSpec, Matchers}

class MongoClientUriSpec extends FlatSpec with Matchers {

  "A MongoClientUri" should "parse database server running locally" in {
    val expected = MongoClientUri(hosts = Set(new InetSocketAddress("localhost", 27017)))
    val actual = MongoClientUri("mongodb://localhost")

    actual shouldBe expected
  }

  it should "parse admin database" in {
    val expected = MongoClientUri(
      credentials = Some(MongoCredentials(username = "sysop", password = Some("moon"))),
      hosts = Set(new InetSocketAddress("localhost", 27017))
    )
    val actual = MongoClientUri("mongodb://sysop:moon@localhost")

    actual shouldBe expected
  }

  it should "parse replica set with members on different machines" in {
    val expected = MongoClientUri(
      hosts = Set(
        new InetSocketAddress("db1.example.net", 27017),
        new InetSocketAddress("db2.example.com", 27017)
      ))
    val actual = MongoClientUri("mongodb://db1.example.net,db2.example.com")

    actual shouldBe expected
  }

  it should "parse replica set with members on localhost" in {
    val expected = MongoClientUri(
      hosts = Set(
        new InetSocketAddress("localhost", 27017),
        new InetSocketAddress("localhost", 27018),
        new InetSocketAddress("localhost", 27019)
      ))
    val actual = MongoClientUri("mongodb://localhost,localhost:27018,localhost:27019")

    actual shouldBe expected
  }

  it should "parse replica set with read distribution" in {
    val expected = MongoClientUri(
      hosts = Set(
        new InetSocketAddress("example1.com", 27017),
        new InetSocketAddress("example2.com", 27017),
        new InetSocketAddress("example3.com", 27017)
      ),
      options = Map("readPreference" -> "secondary")
    )
    val actual = MongoClientUri("mongodb://example1.com,example2.com,example3.com/?readPreference=secondary")

    actual shouldBe expected
  }

  it should "parse replica set with a high level of write concern" in {
    val expected = MongoClientUri(
      hosts = Set(
        new InetSocketAddress("example1.com", 27017),
        new InetSocketAddress("example2.com", 27017),
        new InetSocketAddress("example3.com", 27017)
      ),
      options = Map("w" -> "2", "wtimeoutMS" -> "2000")
    )
    val actual = MongoClientUri("mongodb://example1.com,example2.com,example3.com/?w=2&wtimeoutMS=2000")

    actual shouldBe expected
  }

  it should "fail on invalid connection string" in {
    intercept[IllegalArgumentException] {
      MongoClientUri("mongodb://example1.com,example2.com,example3.com?w=2&wtimeoutMS=2000")
    }
    ()
  }
} 
Example 107
Source File: CalculatorServer.scala    From sbt-docker-compose   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package example

import java.net.InetSocketAddress

import com.sun.net.httpserver.{HttpExchange, HttpHandler, HttpServer}

object CalculatorServer extends App {
  def start(port: Int) = {
    val server = HttpServer.create(new InetSocketAddress(port), 0)
    server.createContext("/", Handler)
    server.start()
    server
  }


  object Handler extends HttpHandler {

    val AddInput = """/add/([-0-9]+)/([-0-9]+)/?""".r
    val SubtractInput = """/subtract/([-0-9]+)/([-0-9]+)/?""".r

    override def handle(ex: HttpExchange): Unit = {
      val path = ex.getRequestURI.toString

      val resultOpt = path match {
        case AddInput(a, b) => Option(Calculator.add(a.toInt, b.toInt))
        case SubtractInput(a, b) =>
          Option(Calculator.subtract(a.toInt, b.toInt))
        case _ => None
      }

      val replyString = resultOpt match {
        case Some(x) =>
          val response = x.toString
          ex.sendResponseHeaders(200, response.length)
          response
        case None =>
          val response = s"Unknown path: $path"
          ex.sendResponseHeaders(404, response.length)
          response
      }

      ex.getResponseBody.write(replyString.getBytes)
    }
  }


  val port = args.headOption.map(_.toInt).getOrElse(8080)
  println(s"Starting calculator server on port $port w/ user args ${args.mkString(": [", ",", "]")}")
  start(port)
} 
Example 108
Source File: Authority.scala    From fintrospect   with Apache License 2.0 5 votes vote down vote up
package io.fintrospect.configuration

import java.net.InetSocketAddress

import scala.util.Try

case class Authority(host: Host, port: Port) {
  override def toString: String = s"${host.value}:${port.value}"

  def socketAddress: InetSocketAddress = new InetSocketAddress(host.value, port.value)
}

object Authority {
  def unapply(str: String): Option[Authority] = {
    val parts = str.split(":")
    parts.length match {
      case 1 => Try(Host(parts(0)).toAuthority(Port(80))).toOption
      case 2 => Try(Host(parts(0)).toAuthority(Port(parts(1)))).toOption
      case _ => None
    }
  }
} 
Example 109
Source File: AuthorityTest.scala    From fintrospect   with Apache License 2.0 5 votes vote down vote up
package io.fintrospect.configuration

import java.net.InetSocketAddress

import org.scalatest.{FunSpec, Matchers}

class AuthorityTest extends FunSpec with Matchers {

  describe("Authority") {
    it("renders ok") {
      Authority(Host.localhost, Port(9999)).toString shouldBe "localhost:9999"
    }
    it("defaults no port to port 80") {
      Authority.unapply("localhost") shouldBe Some(Authority(Host.localhost, Port(80)))
    }
    it("defaults valid host and port") {
      Authority.unapply("localhost:123") shouldBe Some(Authority(Host.localhost, Port(123)))
    }
    it("invalid port number") {
      Authority.unapply("localhost:asd") shouldBe None
    }
    it("too many parts") {
      Authority.unapply("localhost:123:123") shouldBe None
    }
    it("socket address") {
      Authority(Host.localhost, Port(9999)).socketAddress shouldBe new InetSocketAddress("localhost", 9999)
    }
  }
} 
Example 110
Source File: Controller.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.tor

import java.net.InetSocketAddress

import akka.actor.{Actor, ActorLogging, OneForOneStrategy, Props, SupervisorStrategy, Terminated}
import akka.io.{IO, Tcp}
import akka.util.ByteString

import scala.concurrent.ExecutionContext


class Controller(address: InetSocketAddress, protocolHandlerProps: Props)
                (implicit ec: ExecutionContext = ExecutionContext.global) extends Actor with ActorLogging {

  import Controller._
  import Tcp._
  import context.system

  IO(Tcp) ! Connect(address)

  def receive = {
    case e@CommandFailed(_: Connect) =>
      e.cause match {
        case Some(ex) => log.error(ex, "Cannot connect")
        case _ => log.error("Cannot connect")
      }
      context stop self
    case c: Connected =>
      val protocolHandler = context actorOf protocolHandlerProps
      protocolHandler ! c
      val connection = sender()
      connection ! Register(self)
      context watch connection
      context become {
        case data: ByteString =>
          connection ! Write(data)
        case CommandFailed(w: Write) =>
          // O/S buffer was full
          protocolHandler ! SendFailed
          log.error("Tor command failed")
        case Received(data) =>
          protocolHandler ! data
        case _: ConnectionClosed =>
          context stop self
        case Terminated(actor) if actor == connection =>
          context stop self
      }
  }

  // we should not restart a failing tor session
  override val supervisorStrategy = OneForOneStrategy(loggingEnabled = true) { case _ => SupervisorStrategy.Escalate }

}

object Controller {
  def props(address: InetSocketAddress, protocolHandlerProps: Props)(implicit ec: ExecutionContext = ExecutionContext.global) =
    Props(new Controller(address, protocolHandlerProps))

  case object SendFailed

} 
Example 111
Source File: Server.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.io

import java.net.InetSocketAddress

import akka.Done
import akka.actor.{Actor, ActorRef, DiagnosticActorLogging, Props}
import akka.event.Logging.MDC
import akka.io.Tcp.SO.KeepAlive
import akka.io.{IO, Tcp}
import fr.acinq.eclair.Logs.LogCategory
import fr.acinq.eclair.{Logs, NodeParams}

import scala.concurrent.Promise


class Server(nodeParams: NodeParams, switchboard: ActorRef, router: ActorRef, address: InetSocketAddress, bound: Option[Promise[Done]] = None) extends Actor with DiagnosticActorLogging {

  import Tcp._
  import context.system

  IO(Tcp) ! Bind(self, address, options = KeepAlive(true) :: Nil, pullMode = true)

  def receive() = {
    case Bound(localAddress) =>
      bound.map(_.success(Done))
      log.info(s"bound on $localAddress")
      // Accept connections one by one
      sender() ! ResumeAccepting(batchSize = 1)
      context.become(listening(sender()))

    case CommandFailed(_: Bind) =>
      bound.map(_.failure(new RuntimeException("TCP bind failed")))
      context stop self
  }

  def listening(listener: ActorRef): Receive = {
    case Connected(remote, _) =>
      log.info(s"connected to $remote")
      val connection = sender
      val peerConnection = context.actorOf(PeerConnection.props(
        nodeParams = nodeParams,
        switchboard = switchboard,
        router = router
      ))
      peerConnection ! PeerConnection.PendingAuth(connection, remoteNodeId_opt = None, address = remote, origin_opt = None)
      listener ! ResumeAccepting(batchSize = 1)
  }

  override def mdc(currentMessage: Any): MDC = Logs.mdc(Some(LogCategory.CONNECTION))
}

object Server {

  def props(nodeParams: NodeParams, switchboard: ActorRef, router: ActorRef, address: InetSocketAddress, bound: Option[Promise[Done]] = None): Props = Props(new Server(nodeParams, switchboard, router: ActorRef, address, bound))

} 
Example 112
Source File: SparkEsTransportClientConfSpec.scala    From Spark2Elasticsearch   with Apache License 2.0 5 votes vote down vote up
package com.github.jparkie.spark.elasticsearch.conf

import java.net.InetSocketAddress

import org.apache.spark.SparkConf
import org.scalatest.{ MustMatchers, WordSpec }

class SparkEsTransportClientConfSpec extends WordSpec with MustMatchers {
  "SparkEsTransportClientConf" must {
    "be extracted from SparkConf successfully" in {
      val inputSparkConf = new SparkConf()
        .set("es.nodes", "127.0.0.1:9000,127.0.0.1:9001,127.0.0.1:9002")
        .set("es.port", "1337")

      val expectedSparkEsTransportClientConf = SparkEsTransportClientConf(
        transportAddresses = Seq("127.0.0.1:9000", "127.0.0.1:9001", "127.0.0.1:9002"),
        transportPort = 1337,
        transportSettings = Map.empty[String, String]
      )

      val outputSparkEsTransportClientConf = SparkEsTransportClientConf.fromSparkConf(inputSparkConf)

      outputSparkEsTransportClientConf mustEqual expectedSparkEsTransportClientConf
    }

    "be extracted from SparkConf unsuccessfully" in {
      val inputSparkConf = new SparkConf()

      val outputException = intercept[IllegalArgumentException] {
        SparkEsTransportClientConf.fromSparkConf(inputSparkConf)
      }

      outputException.getMessage must include("No nodes defined in property es.nodes is in SparkConf.")
    }

    "extract transportSettings successfully" in {
      val inputSparkConf = new SparkConf()
        .set("es.nodes", "127.0.0.1:9000,127.0.0.1:9001,127.0.0.1:9002")
        .set("es.port", "1337")
        .set("es.cluster.name", "TEST_VALUE_1")
        .set("es.client.transport.sniff", "TEST_VALUE_2")
        .set("es.client.transport.ignore_cluster_name", "TEST_VALUE_3")
        .set("es.client.transport.ping_timeout", "TEST_VALUE_4")
        .set("es.client.transport.nodes_sampler_interval", "TEST_VALUE_5")

      val expectedSparkEsTransportClientConf = SparkEsTransportClientConf(
        transportAddresses = Seq("127.0.0.1:9000", "127.0.0.1:9001", "127.0.0.1:9002"),
        transportPort = 1337,
        transportSettings = Map(
          "cluster.name" -> "TEST_VALUE_1",
          "client.transport.sniff" -> "TEST_VALUE_2",
          "client.transport.ignore_cluster_name" -> "TEST_VALUE_3",
          "client.transport.ping_timeout" -> "TEST_VALUE_4",
          "client.transport.nodes_sampler_interval" -> "TEST_VALUE_5"
        )
      )

      val outputSparkEsTransportClientConf = SparkEsTransportClientConf.fromSparkConf(inputSparkConf)

      outputSparkEsTransportClientConf mustEqual expectedSparkEsTransportClientConf
    }

    "extract transportAddresses as Seq[InetSocketAddress] successfully with port secondly" in {
      val inputAddresses = Seq("127.0.0.1:9000", "127.0.0.1:9001", "127.0.0.1:9002")
      val inputPort = 1337

      val expectedTransportAddresses = Seq(
        new InetSocketAddress("127.0.0.1", 9000),
        new InetSocketAddress("127.0.0.1", 9001),
        new InetSocketAddress("127.0.0.1", 9002)
      )

      val outputTransportAddresses = SparkEsTransportClientConf.getTransportAddresses(inputAddresses, inputPort)

      outputTransportAddresses mustEqual expectedTransportAddresses
    }
  }
} 
Example 113
Source File: NodeAddress.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package redis

import java.net.InetSocketAddress

object NodeAddress {
  final val DefaultIP = "127.0.0.1"
  final val DefaultPort = 6379
  final val DefaultSentinelPort = 26379

  final val Default = NodeAddress()
  final val DefaultSentinel = NodeAddress(port = DefaultSentinelPort)

  def parse(str: String): NodeAddress = {
    val Array(ip, port) = str.split(':')
    NodeAddress(ip, port.toInt)
  }
}
final case class NodeAddress(ip: String = NodeAddress.DefaultIP, port: Int = NodeAddress.DefaultPort) {
  def socketAddress = new InetSocketAddress(ip, port)
  override def toString = s"$ip:$port"
} 
Example 114
Source File: MicroService.scala    From finagle-microservice-sample   with Apache License 2.0 5 votes vote down vote up
import java.net.InetSocketAddress

import com.twitter.finagle.builder.ServerBuilder
import com.twitter.finagle.http.{Http, Request, RichHttp}
import com.twitter.finagle.stats.MetricsExporter
import reports.FakeReportProcessor

object MicroService extends App {

  val loginService = new AlwaysValidLoginService()
  val reportProcessor = new FakeReportProcessor()
  val authenticateUser = new AuthenticationFilter(loginService)
  val processReport = new ProcessReportHandler(reportProcessor)
  val metricsExporter = new MetricsExporter()

  //setup service chain
  val serviceChain = authenticateUser andThen processReport

  //HTTP endpoint
  val socketAddress = new InetSocketAddress(8080)
  val server = ServerBuilder()
    .codec(new RichHttp[Request](Http()))
    .bindTo(socketAddress)
    .name("HTTP endpoint")
    .build(serviceChain)

  println("microservice started")
} 
Example 115
Source File: WebappTestSupports.scala    From pizza-auth-3   with MIT License 5 votes vote down vote up
package moe.pizza.auth.webapp

import java.net.{Socket, InetSocketAddress, ServerSocket}

import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import moe.pizza.auth.config.ConfigFile.ConfigFile

import scala.concurrent.{Future, Await}
import scala.io.Source
import scala.util.Try
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global


object WebappTestSupports {
  val OM = new ObjectMapper(new YAMLFactory())
  OM.registerModule(DefaultScalaModule)

  def readTestConfig(): ConfigFile = {
    val config = Source
      .fromURL(getClass.getResource("/config.yml"))
      .getLines()
      .mkString("\n")
    val conf = OM.readValue[ConfigFile](config, classOf[ConfigFile])
    conf
  }

} 
Example 116
Source File: SystemBuilder.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.sourcing.projections

import java.net.InetSocketAddress
import java.nio.channels.ServerSocketChannel

import akka.actor.ActorSystem
import akka.persistence.cassandra.testkit.CassandraLauncher
import com.typesafe.config.ConfigFactory


  final def cluster(name: String): ActorSystem = {
    val cassandra = CassandraLauncher.randomPort
    val remote    = freePort()
    val config    = ConfigFactory
      .parseString(s"""
         |test.cassandra-port = $cassandra
         |test.remote-port = $remote
       """.stripMargin)
      .withFallback(ConfigFactory.parseResources("cluster.conf"))
      .withFallback(ConfigFactory.parseResources("cassandra.conf"))
      .withFallback(ConfigFactory.load())
      .resolve()
    ActorSystem(name, config)
  }
} 
Example 117
Source File: RandomPortSupport.scala    From scala-ddd-base   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.dddbase.example.repository.util

import java.net.InetSocketAddress
import java.nio.channels.ServerSocketChannel


trait RandomPortSupport {

  def temporaryServerAddress(interface: String = "127.0.0.1"): InetSocketAddress = {
    val serverSocket = ServerSocketChannel.open()
    try {
      serverSocket.socket.bind(new InetSocketAddress(interface, 0))
      val port = serverSocket.socket.getLocalPort
      new InetSocketAddress(interface, port)
    } finally serverSocket.close()
  }

  def temporaryServerHostnameAndPort(interface: String = "127.0.0.1"): (String, Int) = {
    val socketAddress = temporaryServerAddress(interface)
    socketAddress.getHostName -> socketAddress.getPort
  }

  def temporaryServerPort(interface: String = "127.0.0.1"): Int =
    temporaryServerHostnameAndPort(interface)._2
} 
Example 118
Source File: Utils.scala    From scala-ddd-base   with MIT License 5 votes vote down vote up
import java.net.InetSocketAddress
import java.nio.channels.ServerSocketChannel

import sbt._

object Utils {

  implicit class SbtLoggerOps(val self: sbt.Logger) extends AnyVal {

    def toScalaProcessLogger: scala.sys.process.ProcessLogger = new scala.sys.process.ProcessLogger {
      private val _log                     = new FullLogger(self)
      override def out(s: => String): Unit = _log.info(s)

      override def err(s: => String): Unit = _log.err(s)

      override def buffer[T](f: => T): T = _log.buffer(f)
    }
  }

  object RandomPortSupport {

    def temporaryServerAddress(interface: String = "127.0.0.1"): InetSocketAddress = {
      val serverSocket = ServerSocketChannel.open()
      try {
        serverSocket.socket.bind(new InetSocketAddress(interface, 0))
        val port = serverSocket.socket.getLocalPort
        new InetSocketAddress(interface, port)
      } finally serverSocket.close()
    }

    def temporaryServerHostnameAndPort(interface: String = "127.0.0.1"): (String, Int) = {
      val socketAddress = temporaryServerAddress(interface)
      socketAddress.getHostName -> socketAddress.getPort
    }

    def temporaryServerPort(interface: String = "127.0.0.1"): Int =
      temporaryServerHostnameAndPort(interface)._2
  }

} 
Example 119
Source File: CassandraLauncherSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.testkit

import java.io.File
import java.net.InetSocketAddress

import akka.actor.ActorSystem
import akka.testkit.TestKit
import com.datastax.oss.driver.api.core.CqlSession
import org.scalatest.wordspec.AnyWordSpecLike
import org.scalatest.matchers.should.Matchers

import scala.concurrent.duration._
import org.scalatest.BeforeAndAfterAll

class CassandraLauncherSpec
    extends TestKit(ActorSystem("CassandraLauncherSpec"))
    with Matchers
    with AnyWordSpecLike
    with BeforeAndAfterAll {

  override protected def afterAll(): Unit = {
    shutdown(system, verifySystemShutdown = true)
    CassandraLauncher.stop()
    super.afterAll()
  }

  private def testCassandra(): Unit = {
    val session =
      CqlSession
        .builder()
        .withLocalDatacenter("datacenter1")
        .addContactPoint(new InetSocketAddress("localhost", CassandraLauncher.randomPort))
        .build()
    try session.execute("SELECT now() from system.local;").one()
    finally {
      session.close()
    }
  }

  "The CassandraLauncher" must {
    "support forking" in {
      val cassandraDirectory = new File("target/" + system.name)
      CassandraLauncher.start(
        cassandraDirectory,
        configResource = CassandraLauncher.DefaultTestConfigResource,
        clean = true,
        port = 0,
        CassandraLauncher.classpathForResources("logback-test.xml"))

      awaitAssert({
        testCassandra()
      }, 45.seconds)

      CassandraLauncher.stop()

      an[Exception] shouldBe thrownBy(testCassandra())
    }
  }

} 
Example 120
Source File: AkkaUtils.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.tools.data.utils.akka

import java.net.InetSocketAddress

import akka.http.scaladsl.ClientTransport
import akka.http.scaladsl.coding.{Deflate, Gzip, NoCoding}
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.model.headers.{HttpEncoding, HttpEncodings}
import akka.http.scaladsl.settings.{ClientConnectionSettings, ConnectionPoolSettings}
import com.typesafe.config.ConfigFactory

object AkkaUtils {

  def generateClientConnectionSettings(userAgent : String) ={
    val settings = ClientConnectionSettings(
        ConfigFactory
          .parseString(s"akka.http.host-connection-pool.client.user-agent-header=$userAgent")
          .withFallback(config)
      )

    val proxyHost = config.getString("akka.http.client.proxy.https.host")
    val proxyPort = config.getInt("akka.http.client.proxy.https.port")

    if (proxyHost!="")
    {
      val httpsProxyTransport = ClientTransport.httpsProxy(InetSocketAddress.createUnresolved(proxyHost, proxyPort))
      settings.withTransport(httpsProxyTransport)
    }
    else
      settings
  }

  def generateConnectionPoolSettings(userAgent : Option[String] = None) ={
    val settings = userAgent.fold{
      ConnectionPoolSettings(config)
      .withConnectionSettings(ClientConnectionSettings(config))
    }{userAgentV =>
      ConnectionPoolSettings(
        ConfigFactory
          .parseString(s"akka.http.host-connection-pool.client.user-agent-header=$userAgentV")
          .withFallback(config)
      )
    }

    val proxyHost = config.getString("akka.http.client.proxy.https.host")
    val proxyPort = config.getInt("akka.http.client.proxy.https.port")

    if (proxyHost!="")
    {
      val httpsProxyTransport = ClientTransport.httpsProxy(InetSocketAddress.createUnresolved(proxyHost, proxyPort))
      settings.withTransport(httpsProxyTransport)
    }
    else
      settings
  }

  def decodeResponse(response: HttpResponse) = {
    val decoder = response.encoding match {
      case HttpEncodings.gzip ⇒
        Gzip
      case HttpEncodings.deflate ⇒
        Deflate
      case HttpEncodings.identity ⇒
        NoCoding
      case HttpEncoding(_) ⇒ ???
    }
    decoder.decodeMessage(response)
  }

} 
Example 121
Source File: RandomPortSupport.scala    From reactive-aws-clients   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.reactive.aws.kinesis.test

import java.net.InetSocketAddress
import java.nio.channels.ServerSocketChannel


trait RandomPortSupport {

  def temporaryServerAddress(interface: String = "127.0.0.1"): InetSocketAddress = {
    val serverSocket = ServerSocketChannel.open()
    try {
      serverSocket.socket.bind(new InetSocketAddress(interface, 0))
      val port = serverSocket.socket.getLocalPort
      new InetSocketAddress(interface, port)
    } finally serverSocket.close()
  }

  def temporaryServerHostnameAndPort(interface: String = "127.0.0.1"): (String, Int) = {
    val socketAddress = temporaryServerAddress(interface)
    socketAddress.getHostName -> socketAddress.getPort
  }

  def temporaryServerPort(interface: String = "127.0.0.1"): Int =
    temporaryServerHostnameAndPort(interface)._2
}

object RandomPortSupport extends RandomPortSupport 
Example 122
Source File: RandomPortSupport.scala    From reactive-aws-clients   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.reactive.aws.test

import java.net.InetSocketAddress
import java.nio.channels.ServerSocketChannel


trait RandomPortSupport {

  def temporaryServerAddress(interface: String = "127.0.0.1"): InetSocketAddress = {
    val serverSocket = ServerSocketChannel.open()
    try {
      serverSocket.socket.bind(new InetSocketAddress(interface, 0))
      val port = serverSocket.socket.getLocalPort
      new InetSocketAddress(interface, port)
    } finally serverSocket.close()
  }

  def temporaryServerHostnameAndPort(interface: String = "127.0.0.1"): (String, Int) = {
    val socketAddress = temporaryServerAddress(interface)
    socketAddress.getHostName -> socketAddress.getPort
  }

  def temporaryServerPort(interface: String = "127.0.0.1"): Int =
    temporaryServerHostnameAndPort(interface)._2
} 
Example 123
Source File: OffsetGraphiteReporter.scala    From kafka-offset-monitor-graphite   with Apache License 2.0 5 votes vote down vote up
package pl.allegro.tech.kafka.offset.monitor.graphite

import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

import com.codahale.metrics.{MetricRegistry, MetricFilter}
import com.codahale.metrics.graphite.{GraphiteReporter, Graphite}
import com.google.common.cache._
import com.quantifind.kafka.OffsetGetter.OffsetInfo
import com.codahale.metrics.Gauge

class OffsetGraphiteReporter (pluginsArgs: String) extends com.quantifind.kafka.offsetapp.OffsetInfoReporter {

  GraphiteReporterArguments.parseArguments(pluginsArgs)

  val metrics : MetricRegistry = new MetricRegistry()

  val graphite : Graphite = new Graphite(new InetSocketAddress(GraphiteReporterArguments.graphiteHost, GraphiteReporterArguments.graphitePort))
  val reporter : GraphiteReporter = GraphiteReporter.forRegistry(metrics)
    .prefixedWith(GraphiteReporterArguments.graphitePrefix)
    .convertRatesTo(TimeUnit.SECONDS)
    .convertDurationsTo(TimeUnit.MILLISECONDS)
    .filter(MetricFilter.ALL)
    .build(graphite)

  reporter.start(GraphiteReporterArguments.graphiteReportPeriod, TimeUnit.SECONDS)

  val removalListener : RemovalListener[String, GaugesValues] = new RemovalListener[String, GaugesValues] {
    override def onRemoval(removalNotification: RemovalNotification[String, GaugesValues]) = {
      metrics.remove(removalNotification.getKey() + ".offset")
      metrics.remove(removalNotification.getKey() + ".logSize")
      metrics.remove(removalNotification.getKey() + ".lag")
    }
  }

  val gauges : LoadingCache[String, GaugesValues] = CacheBuilder.newBuilder()
    .expireAfterAccess(GraphiteReporterArguments.metricsCacheExpireSeconds, TimeUnit.SECONDS)
    .removalListener(removalListener)
    .build(
      new CacheLoader[String, GaugesValues]() {
        def load(key: String): GaugesValues = {
          val values: GaugesValues = new GaugesValues()

          val offsetGauge: Gauge[Long] = new Gauge[Long] {
            override def getValue: Long = {
              values.offset
            }
          }

          val lagGauge: Gauge[Long] = new Gauge[Long] {
            override def getValue: Long = {
              values.lag
            }
          }

          val logSizeGauge: Gauge[Long] = new Gauge[Long] {
            override def getValue: Long = {
              values.logSize
            }
          }

          metrics.register(key + ".offset", offsetGauge)
          metrics.register(key + ".logSize", logSizeGauge)
          metrics.register(key + ".lag", lagGauge)

          values
        }
      }
   )

  override def report(info: scala.IndexedSeq[OffsetInfo]) =  {
    info.foreach(i => {
      val values: GaugesValues = gauges.get(getMetricName(i))
      values.logSize = i.logSize
      values.offset = i.offset
      values.lag = i.lag
    })
  }

  def getMetricName(offsetInfo: OffsetInfo): String = {
    offsetInfo.topic.replace(".", "_") + "." + offsetInfo.group.replace(".", "_") + "." + offsetInfo.partition
  }
} 
Example 124
Source File: NetworkSender.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network.client

import java.io.IOException
import java.net.InetSocketAddress
import java.nio.channels.ClosedChannelException

import com.wavesplatform.network.TrafficLogger
import com.wavesplatform.utils.ScorexLogging
import io.netty.channel.Channel
import io.netty.channel.group.DefaultChannelGroup
import io.netty.util.concurrent.GlobalEventExecutor

import scala.concurrent.{ExecutionContext, Future, Promise}

class NetworkSender(trafficLoggerSettings: TrafficLogger.Settings, chainId: Char, name: String, nonce: Long)(implicit ec: ExecutionContext)
    extends ScorexLogging {
  private[this] val MessagesBatchSize = 100

  private[this] val allChannels = new DefaultChannelGroup(GlobalEventExecutor.INSTANCE)
  private[this] val client      = new NetworkClient(trafficLoggerSettings, chainId, name, nonce, allChannels)

  def connect(address: InetSocketAddress): Future[Channel] =
    client.connect(address)

  def send(channel: Channel, messages: Any*): Future[Unit] = {
    def doWrite(messages: Seq[Any]): Future[Unit] =
      if (messages.isEmpty)
        Future.successful(())
      else if (!channel.isWritable)
        Future.failed(new ClosedChannelException)
      else {
        val (send, keep) = messages.splitAt(MessagesBatchSize)
        val futures = send.toVector.map { msg =>
          val result = Promise[Unit]()
          channel.write(msg).addListener { (f: io.netty.util.concurrent.Future[Void]) =>
            if (!f.isSuccess) {
              val cause = Option(f.cause()).getOrElse(new IOException("Can't send a message to the channel"))
              log.error(s"Can't send a message to the channel: $msg", cause)
              result.failure(cause)
            } else {
              result.success(())
            }
          }
          result.future
        }

        channel.flush()
        Future.sequence(futures).flatMap(_ => doWrite(keep))
      }

    doWrite(messages)
  }

  def close(): Unit = client.shutdown()
} 
Example 125
Source File: NetworkClient.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network.client

import java.io.IOException
import java.net.InetSocketAddress

import com.wavesplatform.Version
import com.wavesplatform.network.{Handshake, TrafficLogger}
import com.wavesplatform.settings._
import com.wavesplatform.utils.ScorexLogging
import io.netty.bootstrap.Bootstrap
import io.netty.channel._
import io.netty.channel.group.ChannelGroup
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.NioSocketChannel

import scala.concurrent.{Future, Promise}

class NetworkClient(trafficLoggerSettings: TrafficLogger.Settings, chainId: Char, nodeName: String, nonce: Long, allChannels: ChannelGroup) extends ScorexLogging {

  private val workerGroup = new NioEventLoopGroup()
  private val handshake   = Handshake(Constants.ApplicationName + chainId, Version.VersionTuple, nodeName, nonce, None)

  def connect(remoteAddress: InetSocketAddress): Future[Channel] = {
    val p = Promise[Channel]

    val bootstrap = new Bootstrap()
      .group(workerGroup)
      .channel(classOf[NioSocketChannel])
      .handler(new LegacyChannelInitializer(trafficLoggerSettings, handshake, p))

    log.debug(s"Connecting to $remoteAddress")
    val channelFuture = bootstrap.connect(remoteAddress)
    channelFuture.addListener((_: io.netty.util.concurrent.Future[Void]) => {
      log.debug(s"Connected to $remoteAddress")
      channelFuture.channel().write(p)
    })

    val channel = channelFuture.channel()
    allChannels.add(channel)
    channel.closeFuture().addListener { (chf: ChannelFuture) =>
      if (!p.isCompleted) {
        val cause = Option(chf.cause()).getOrElse(new IllegalStateException("The connection is closed before handshake"))
        p.failure(new IOException(cause))
      }
      log.debug(s"Connection to $remoteAddress closed")
      allChannels.remove(chf.channel())
    }

    p.future
  }

  def shutdown(): Unit =
    try {
      allChannels.close().await()
      log.debug("Closed all channels")
    } finally {
      workerGroup.shutdownGracefully()
    }
} 
Example 126
Source File: messages.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import java.net.InetSocketAddress
import java.util

import com.wavesplatform.account.{KeyPair, PublicKey}
import com.wavesplatform.block.Block.BlockId
import com.wavesplatform.block.{Block, MicroBlock}
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.crypto
import com.wavesplatform.transaction.{LegacyPBSwitch, ProtobufOnly, Signed, Transaction}
import monix.eval.Coeval

sealed trait Message

case object GetPeers extends Message

case class KnownPeers(peers: Seq[InetSocketAddress]) extends Message

case class GetSignatures(signatures: Seq[ByteStr]) extends Message {
  override def toString: String = s"GetSignatures(${formatSignatures(signatures)})"
}

case class Signatures(signatures: Seq[ByteStr]) extends Message {
  override def toString: String = s"Signatures(${formatSignatures(signatures)})"
}

case class GetBlock(signature: ByteStr) extends Message

case class LocalScoreChanged(newLocalScore: BigInt) extends Message

case class RawBytes(code: Byte, data: Array[Byte]) extends Message {
  override def toString: String = s"RawBytes($code, ${data.length} bytes)"

  override def equals(obj: Any): Boolean = obj match {
    case o: RawBytes => o.code == code && util.Arrays.equals(o.data, data)
    case _ => false
  }
}

object RawBytes {
  def fromTransaction(tx: Transaction): RawBytes = tx match {
    case p: LegacyPBSwitch if p.isProtobufVersion => RawBytes(PBTransactionSpec.messageCode, PBTransactionSpec.serializeData(tx))
    case _: ProtobufOnly                          => RawBytes(PBTransactionSpec.messageCode, PBTransactionSpec.serializeData(tx))
    case tx                                       => RawBytes(TransactionSpec.messageCode, TransactionSpec.serializeData(tx))
  }

  def fromBlock(b: Block): RawBytes =
    if (b.header.version < Block.ProtoBlockVersion) RawBytes(BlockSpec.messageCode, BlockSpec.serializeData(b))
    else RawBytes(PBBlockSpec.messageCode, PBBlockSpec.serializeData(b))

  def fromMicroBlock(mb: MicroBlockResponse): RawBytes =
    if (mb.microblock.version < Block.ProtoBlockVersion)
      RawBytes(LegacyMicroBlockResponseSpec.messageCode, LegacyMicroBlockResponseSpec.serializeData(mb))
    else RawBytes(PBMicroBlockSpec.messageCode, PBMicroBlockSpec.serializeData(mb))
}

case class BlockForged(block: Block) extends Message

case class MicroBlockRequest(totalBlockSig: ByteStr) extends Message

case class MicroBlockResponse(microblock: MicroBlock, totalBlockId: BlockId)

object MicroBlockResponse {
  def apply(mb: MicroBlock): MicroBlockResponse = {
    require(mb.version < Block.ProtoBlockVersion)
    MicroBlockResponse(mb, mb.totalResBlockSig)
  }
}

case class MicroBlockInv(sender: PublicKey, totalBlockId: ByteStr, reference: ByteStr, signature: ByteStr) extends Message with Signed {
  override val signatureValid: Coeval[Boolean] =
    Coeval.evalOnce(crypto.verify(signature, sender.toAddress.bytes ++ totalBlockId.arr ++ reference.arr, sender))

  override def toString: String = s"MicroBlockInv(${totalBlockId.trim} ~> ${reference.trim})"
}

object MicroBlockInv {

  def apply(sender: KeyPair, totalBlockRef: ByteStr, prevBlockRef: ByteStr): MicroBlockInv = {
    val signature = crypto.sign(sender.privateKey, sender.toAddress.bytes ++ totalBlockRef.arr ++ prevBlockRef.arr)
    new MicroBlockInv(sender.publicKey, totalBlockRef, prevBlockRef, signature)
  }
} 
Example 127
Source File: Handshake.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import java.net.{InetAddress, InetSocketAddress}

import com.google.common.base.Charsets
import io.netty.buffer.ByteBuf
import com.wavesplatform.utils._

case class Handshake(applicationName: String,
                     applicationVersion: (Int, Int, Int),
                     nodeName: String,
                     nodeNonce: Long,
                     declaredAddress: Option[InetSocketAddress]) {
  def encode(out: ByteBuf): out.type = {
    val applicationNameBytes = applicationName.utf8Bytes
    require(applicationNameBytes.length <= Byte.MaxValue, "The application name is too long!")
    out.writeByte(applicationNameBytes.length)
    out.writeBytes(applicationNameBytes)

    out.writeInt(applicationVersion._1)
    out.writeInt(applicationVersion._2)
    out.writeInt(applicationVersion._3)

    val nodeNameBytes = nodeName.utf8Bytes
    require(nodeNameBytes.length <= Byte.MaxValue, "A node name is too long!")
    out.writeByte(nodeNameBytes.length)
    out.writeBytes(nodeNameBytes)

    out.writeLong(nodeNonce)

    val peer = for {
      inetAddress <- declaredAddress
      address     <- Option(inetAddress.getAddress)
    } yield (address.getAddress, inetAddress.getPort)

    peer match {
      case None => out.writeInt(0)
      case Some((addressBytes, peerPort)) =>
        out.writeInt(addressBytes.length + Integer.BYTES)
        out.writeBytes(addressBytes)
        out.writeInt(peerPort)
    }

    out.writeLong(System.currentTimeMillis() / 1000)
    out
  }
}

object Handshake {
  class InvalidHandshakeException(msg: String) extends IllegalArgumentException(msg)

  def decode(in: ByteBuf): Handshake = {
    val appNameSize = in.readByte()

    if (appNameSize < 0 || appNameSize > Byte.MaxValue) {
      throw new InvalidHandshakeException(s"An invalid application name's size: $appNameSize")
    }
    val appName    = in.readSlice(appNameSize).toString(Charsets.UTF_8)
    val appVersion = (in.readInt(), in.readInt(), in.readInt())

    val nodeNameSize = in.readByte()
    if (nodeNameSize < 0 || nodeNameSize > Byte.MaxValue) {
      throw new InvalidHandshakeException(s"An invalid node name's size: $nodeNameSize")
    }
    val nodeName = in.readSlice(nodeNameSize).toString(Charsets.UTF_8)

    val nonce = in.readLong()

    val declaredAddressLength = in.readInt()
    // 0 for no declared address, 8 for ipv4 address + port, 20 for ipv6 address + port
    if (declaredAddressLength != 0 && declaredAddressLength != 8 && declaredAddressLength != 20) {
      throw new InvalidHandshakeException(s"An invalid declared address length: $declaredAddressLength")
    }
    val isa =
      if (declaredAddressLength == 0) None
      else {
        val addressBytes = new Array[Byte](declaredAddressLength - Integer.BYTES)
        in.readBytes(addressBytes)
        val address = InetAddress.getByAddress(addressBytes)
        val port    = in.readInt()
        Some(new InetSocketAddress(address, port))
      }
    in.readLong() // time is ignored

    Handshake(appName, appVersion, nodeName, nonce, isa)
  }
} 
Example 128
Source File: Node.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it

import java.net.{InetSocketAddress, URL}
import java.util.concurrent.TimeUnit

import com.typesafe.config.Config
import com.wavesplatform.account.{KeyPair, PublicKey}
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.it.util.GlobalTimer
import com.wavesplatform.settings.WavesSettings
import com.wavesplatform.state.diffs.FeeValidation
import com.wavesplatform.utils.LoggerFacade
import io.grpc.{ManagedChannel, ManagedChannelBuilder}
import org.asynchttpclient.Dsl.{config => clientConfig, _}
import org.asynchttpclient._
import org.slf4j.LoggerFactory

import scala.concurrent.duration.FiniteDuration

abstract class Node(val config: Config) extends AutoCloseable {
  lazy val log: LoggerFacade =
    LoggerFacade(LoggerFactory.getLogger(s"${getClass.getCanonicalName}.${this.name}"))

  val settings: WavesSettings = WavesSettings.fromRootConfig(config)
  val client: AsyncHttpClient = asyncHttpClient(
    clientConfig()
      .setKeepAlive(false)
      .setNettyTimer(GlobalTimer.instance))

  lazy val grpcChannel: ManagedChannel = ManagedChannelBuilder.forAddress(networkAddress.getHostString, nodeExternalPort(6870))
    .usePlaintext()
    .keepAliveWithoutCalls(true)
    .keepAliveTime(30, TimeUnit.SECONDS)
    .build()

  val keyPair: KeyPair  = KeyPair.fromSeed(config.getString("account-seed")).explicitGet()
  val publicKey: PublicKey = PublicKey.fromBase58String(config.getString("public-key")).explicitGet()
  val address: String      = config.getString("address")

  def nodeExternalPort(internalPort: Int): Int
  def nodeApiEndpoint: URL
  def apiKey: String

  
  def networkAddress: InetSocketAddress

  override def close(): Unit = client.close()
}

object Node {
  implicit class NodeExt(val n: Node) extends AnyVal {
    def name: String               = n.settings.networkSettings.nodeName
    def publicKeyStr: String       = n.publicKey.toString
    def fee(txTypeId: Byte): Long  = FeeValidation.FeeConstants(txTypeId) * FeeValidation.FeeUnit
    def blockDelay: FiniteDuration = n.settings.blockchainSettings.genesisSettings.averageBlockDelay
  }
} 
Example 129
Source File: GeneratorSettings.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.generator

import java.net.{InetSocketAddress, URL}
import java.nio.charset.StandardCharsets

import cats.Show
import cats.implicits.showInterpolator
import com.google.common.primitives.{Bytes, Ints}
import com.wavesplatform.account.KeyPair
import com.wavesplatform.generator.GeneratorSettings.NodeAddress

case class GeneratorSettings(
    chainId: String,
    accounts: Seq[String],
    sendTo: Seq[NodeAddress],
    worker: Worker.Settings,
    mode: Mode.Value,
    narrow: NarrowTransactionGenerator.Settings,
    wide: WideTransactionGenerator.Settings,
    dynWide: DynamicWideTransactionGenerator.Settings,
    multisig: MultisigTransactionGenerator.Settings,
    oracle: OracleTransactionGenerator.Settings,
    swarm: SmartGenerator.Settings
) {
  val addressScheme: Char              = chainId.head
  val privateKeyAccounts: Seq[KeyPair] = accounts.map(s => GeneratorSettings.toKeyPair(s))
}

object GeneratorSettings {
  case class NodeAddress(networkAddress: InetSocketAddress, apiAddress: URL)

  implicit val toPrintable: Show[GeneratorSettings] = { x =>
    import x._

    val modeSettings: String = (mode match {
      case Mode.NARROW   => show"$narrow"
      case Mode.WIDE     => show"$wide"
      case Mode.DYN_WIDE => show"$dynWide"
      case Mode.MULTISIG => show"$multisig"
      case Mode.ORACLE   => show"$oracle"
      case Mode.SWARM    => show"$swarm"
    }).toString

    s"""network byte: $chainId
       |rich accounts:
       |  ${accounts.mkString("\n  ")}
       |recipient nodes:
       |  ${sendTo.mkString("\n  ")}
       |worker:
       |  ${show"$worker".split('\n').mkString("\n  ")}
       |mode: $mode
       |$mode settings:
       |  ${modeSettings.split('\n').mkString("\n  ")}""".stripMargin
  }

  def toKeyPair(seedText: String): KeyPair = {
    KeyPair(com.wavesplatform.crypto.secureHash(Bytes.concat(Ints.toByteArray(0), seedText.getBytes(StandardCharsets.UTF_8))))
  }
} 
Example 130
Source File: GRPCServerExtension.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.api.grpc

import java.net.InetSocketAddress

import com.wavesplatform.extensions.{Extension, Context => ExtensionContext}
import com.wavesplatform.settings.GRPCSettings
import com.wavesplatform.utils.ScorexLogging
import io.grpc.Server
import io.grpc.netty.NettyServerBuilder
import monix.execution.Scheduler
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ArbitraryTypeReader._

import scala.concurrent.Future

class GRPCServerExtension(context: ExtensionContext) extends Extension with ScorexLogging {
  @volatile
  var server: Server = _

  override def start(): Unit = {
    val settings = context.settings.config.as[GRPCSettings]("waves.grpc")
    this.server = startServer(settings)
  }

  override def shutdown(): Future[Unit] = {
    log.debug("Shutting down gRPC server")
    if (server != null) {
      server.shutdown()
      Future(server.awaitTermination())(context.actorSystem.dispatcher)
    } else {
      Future.successful(())
    }
  }

  private[this] def startServer(settings: GRPCSettings): Server = {
    implicit val apiScheduler: Scheduler = Scheduler(context.actorSystem.dispatcher)

    val bindAddress = new InetSocketAddress(settings.host, settings.port)
    val server: Server = NettyServerBuilder
      .forAddress(bindAddress)
      .addService(TransactionsApiGrpc.bindService(new TransactionsApiGrpcImpl(context.transactionsApi), apiScheduler))
      .addService(BlocksApiGrpc.bindService(new BlocksApiGrpcImpl(context.blocksApi), apiScheduler))
      .addService(AccountsApiGrpc.bindService(new AccountsApiGrpcImpl(context.accountsApi), apiScheduler))
      .addService(AssetsApiGrpc.bindService(new AssetsApiGrpcImpl(context.assetsApi, context.accountsApi), apiScheduler))
      .addService(BlockchainApiGrpc.bindService(new BlockchainApiGrpcImpl(context.blockchain, context.settings.featuresSettings), apiScheduler))
      .build()
      .start()

    log.info(s"gRPC API was bound to $bindAddress")
    server
  }
} 
Example 131
Source File: StatsDClient.scala    From datadog4s   with MIT License 5 votes vote down vote up
package com.avast.datadog4s.statsd

import java.net.InetSocketAddress

import cats.effect.{ Resource, Sync }
import com.timgroup.statsd.{ NonBlockingStatsDClient, NonBlockingStatsDClientBuilder }

object StatsDClient {
  def make[F[_]: Sync](statsDServer: InetSocketAddress, queueSize: Int): Resource[F, NonBlockingStatsDClient] = {
    val builder = new NonBlockingStatsDClientBuilder()
      .hostname(statsDServer.getHostName)
      .port(statsDServer.getPort)
      .queueSize(queueSize)
      .prefix("")
    fromBuilder(builder)
  }

  def fromBuilder[F[_]: Sync](builder: NonBlockingStatsDClientBuilder): Resource[F, NonBlockingStatsDClient] =
    Resource.fromAutoCloseable(Sync[F].delay(builder.build()))
} 
Example 132
Source File: MetricsReporter.scala    From core   with Apache License 2.0 5 votes vote down vote up
package com.smartbackpackerapp.http.metrics

import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

import cats.effect.Sync
import cats.syntax.functor._
import com.codahale.metrics._
import com.codahale.metrics.graphite.{Graphite, GraphiteReporter}

class MetricsReporter[F[_]](registry: MetricRegistry)(implicit F: Sync[F]) {

  private val graphiteHost = sys.env.getOrElse("GRAPHITE_HOST", "localhost")
  private val graphitePort = sys.env.getOrElse("GRAPHITE_PORT", "2003").toInt

  private lazy val graphite = new Graphite(new InetSocketAddress(graphiteHost, graphitePort))

  private lazy val reporter = F.delay {
    GraphiteReporter
      .forRegistry(registry)
      .convertRatesTo(TimeUnit.SECONDS)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .build(graphite)
  }

  val start: F[Unit] =
    reporter.map(_.start(15, TimeUnit.SECONDS))

} 
Example 133
Source File: DTLSConnectionFn.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.coap.connection

import java.io.FileInputStream
import java.net.{ConnectException, InetAddress, InetSocketAddress, URI}
import java.security.cert.Certificate
import java.security.{KeyStore, PrivateKey}

import com.datamountaineer.streamreactor.connect.coap.configs.{CoapConstants, CoapSetting}
import com.typesafe.scalalogging.StrictLogging
import org.eclipse.californium.core.CoapClient
import org.eclipse.californium.core.coap.CoAP
import org.eclipse.californium.core.network.CoapEndpoint
import org.eclipse.californium.core.network.config.NetworkConfig
import org.eclipse.californium.scandium.DTLSConnector
import org.eclipse.californium.scandium.config.DtlsConnectorConfig
import org.eclipse.californium.scandium.dtls.cipher.CipherSuite
import org.eclipse.californium.scandium.dtls.pskstore.InMemoryPskStore


  def discoverServer(address: String, uri: URI): URI = {
    val client = new CoapClient(s"${uri.getScheme}://$address:${uri.getPort.toString}/.well-known/core")
    client.useNONs()
    val response = client.get()

    if (response != null) {
      logger.info(s"Discovered Server ${response.advanced().getSource.toString}.")
      new URI(uri.getScheme,
        uri.getUserInfo,
        response.advanced().getSource.getHostName,
        response.advanced().getSourcePort,
        uri.getPath,
        uri.getQuery,
        uri.getFragment)
    } else {
      logger.error(s"Unable to find any servers on local network with multicast address $address.")
      throw new ConnectException(s"Unable to find any servers on local network with multicast address $address.")
    }
  }
} 
Example 134
Source File: InboundConnectionFilter.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import java.net.{InetAddress, InetSocketAddress}
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger

import com.wavesplatform.utils.ScorexLogging
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel.{ChannelFuture, ChannelHandlerContext}
import io.netty.handler.ipfilter.AbstractRemoteAddressFilter

@Sharable
class InboundConnectionFilter(peerDatabase: PeerDatabase, maxInboundConnections: Int, maxConnectionsPerHost: Int)
    extends AbstractRemoteAddressFilter[InetSocketAddress]
    with ScorexLogging {
  private val inboundConnectionCount = new AtomicInteger(0)
  private val perHostConnectionCount = new ConcurrentHashMap[InetAddress, Int]
  private val emptyChannelFuture     = null.asInstanceOf[ChannelFuture]

  private def dec(remoteAddress: InetAddress) = {
    inboundConnectionCount.decrementAndGet()
    log.trace(s"Number of inbound connections: ${inboundConnectionCount.get()}")
    perHostConnectionCount.compute(remoteAddress, (_, cnt) => cnt - 1)
    emptyChannelFuture
  }

  override def accept(ctx: ChannelHandlerContext, remoteAddress: InetSocketAddress): Boolean = Option(remoteAddress.getAddress) match {
    case None =>
      log.debug(s"Can't obtain an address from $remoteAddress")
      false

    case Some(address) =>
      val newTotal        = inboundConnectionCount.incrementAndGet()
      val newCountPerHost = perHostConnectionCount.compute(address, (_, cnt) => Option(cnt).fold(1)(_ + 1))
      val isBlacklisted   = peerDatabase.blacklistedHosts.contains(address)

      val accepted = newTotal <= maxInboundConnections &&
        newCountPerHost <= maxConnectionsPerHost &&
        !isBlacklisted

      log.trace(
        s"Check inbound connection from $remoteAddress: new inbound total = $newTotal, " +
          s"connections with this host = $newCountPerHost, address ${if (isBlacklisted) "IS" else "is not"} blacklisted, " +
          s"${if (accepted) "is" else "is not"} accepted"
      )

      accepted
  }

  override def channelAccepted(ctx: ChannelHandlerContext, remoteAddress: InetSocketAddress): Unit =
    ctx.channel().closeFuture().addListener((_: ChannelFuture) => Option(remoteAddress.getAddress).foreach(dec))

  override def channelRejected(ctx: ChannelHandlerContext, remoteAddress: InetSocketAddress): ChannelFuture =
    Option(remoteAddress.getAddress).fold(emptyChannelFuture)(dec)
} 
Example 135
Source File: TestUtils.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container


import java.net.InetSocketAddress
import java.nio.channels.ServerSocketChannel

object TestUtils {
  def temporaryServerAddress(interface: String = "127.0.0.1"): InetSocketAddress = {
    val serverSocket = ServerSocketChannel.open()
    try {
      serverSocket.socket.bind(new InetSocketAddress(interface, 0))
      val port = serverSocket.socket.getLocalPort
      new InetSocketAddress(interface, port)
    } finally serverSocket.close()
  }

  def temporaryServerHostnameAndPort(interface: String = "127.0.0.1"): (InetSocketAddress, String, Int) = {
    val socketAddress = temporaryServerAddress(interface)
    (socketAddress, socketAddress.getHostName, socketAddress.getPort)
  }
} 
Example 136
Source File: RandomPortSupport.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.adaptor.util

import java.net.InetSocketAddress
import java.nio.channels.ServerSocketChannel


trait RandomPortSupport {

  def temporaryServerAddress(interface: String = "127.0.0.1"): InetSocketAddress = {
    val serverSocket = ServerSocketChannel.open()
    try {
      serverSocket.socket.bind(new InetSocketAddress(interface, 0))
      val port = serverSocket.socket.getLocalPort
      new InetSocketAddress(interface, port)
    } finally serverSocket.close()
  }

  def temporaryServerHostnameAndPort(interface: String = "127.0.0.1"): (String, Int) = {
    val socketAddress = temporaryServerAddress(interface)
    socketAddress.getHostName -> socketAddress.getPort
  }

  def temporaryServerPort(interface: String = "127.0.0.1"): Int =
    temporaryServerHostnameAndPort(interface)._2
}

object RandomPortSupport extends RandomPortSupport 
Example 137
Source File: Utils.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
object Utils {
  import java.net.InetSocketAddress
  import java.nio.channels.ServerSocketChannel

  
  trait RandomPortSupport {

    def temporaryServerAddress(interface: String = "127.0.0.1"): InetSocketAddress = {
      val serverSocket = ServerSocketChannel.open()
      try {
        serverSocket.socket.bind(new InetSocketAddress(interface, 0))
        val port = serverSocket.socket.getLocalPort
        new InetSocketAddress(interface, port)
      } finally serverSocket.close()
    }

    def temporaryServerHostnameAndPort(interface: String = "127.0.0.1"): (String, Int) = {
      val socketAddress = temporaryServerAddress(interface)
      socketAddress.getHostName -> socketAddress.getPort
    }

    def temporaryServerPort(interface: String = "127.0.0.1"): Int =
      temporaryServerHostnameAndPort(interface)._2
  }

  object RandomPortSupport extends RandomPortSupport

} 
Example 138
Source File: Netty.scala    From scalaz-netty   with Apache License 2.0 5 votes vote down vote up
package scalaz
package netty

import java.net.InetSocketAddress
import java.util.concurrent.ExecutorService

import _root_.io.netty.channel._
import scodec.bits.ByteVector

import scalaz.concurrent._
import scalaz.stream._

object Netty {

  def server(bind: InetSocketAddress, config: ServerConfig = ServerConfig.Default)(implicit pool: ExecutorService = Strategy.DefaultExecutorService, S: Strategy): Process[Task, Process[Task, Exchange[ByteVector, ByteVector]]] = {
    Process.bracket(Server(bind, config))(s => Process.eval(s.shutdown).drain) { server: Server =>
      server.listen
    }
  }

  def connect(to: InetSocketAddress, config: ClientConfig = ClientConfig.Default)(implicit pool: ExecutorService = Strategy.DefaultExecutorService, S: Strategy): Process[Task, Exchange[ByteVector, ByteVector]] = {
    Process.bracket(Client(to, config))(_.shutdown) { client: Client =>
      Process(Exchange(client.read, client.write))
    }
  }

  private[netty] def toTask(f: ChannelFuture)(implicit pool: ExecutorService): Task[Unit] = fork {
    Task async { (cb: (Throwable \/ Unit) => Unit) =>
      f.addListener(new ChannelFutureListener {
        def operationComplete(f: ChannelFuture): Unit = {
          if (f.isSuccess)
            cb(\/-(()))
          else
            cb(-\/(f.cause))
        }
      })
    }
  }

  private def fork[A](t: Task[A])(implicit pool: ExecutorService = Strategy.DefaultExecutorService): Task[A] = {
    Task async { cb =>
      t unsafePerformAsync { either =>
        pool.submit(new Runnable {
          def run(): Unit = cb(either)
        })

        ()
      }
    }
  }
} 
Example 139
Source File: Webpack.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
import java.net.InetSocketAddress
import play.sbt.PlayRunHook
import sbt._
import scala.sys.process.Process

object Webpack {
  def apply(base: File): PlayRunHook = {
    object WebpackHook extends PlayRunHook {
      var process: Option[Process] = None

      override def beforeStarted() = {
        process = Option(
          Process("webpack", base).run()
        )
      }

      override def afterStarted(addr: InetSocketAddress) = {
        process = Option(
          Process("webpack --watch --watch-poll", base).run()
        )
      }

      override def afterStopped() = {
        process.foreach(_.destroy())
        process = None
      }
    }

    WebpackHook
  }
} 
Example 140
Source File: TestHelper.scala    From diffy   with GNU Affero General Public License v3.0 5 votes vote down vote up
package ai.diffy

import java.net.InetSocketAddress

import ai.diffy.analysis._
import ai.diffy.compare.Difference
import ai.diffy.proxy._
import ai.diffy.util.ServiceInstance
import com.twitter.util.{Duration, StorageUnit}
import org.scalatest.mock.MockitoSugar

object TestHelper extends MockitoSugar {
  lazy val testSettings = Settings(
    datacenter = "test",
    servicePort = new InetSocketAddress(9999),
    candidate = "candidate",
    primary = "primary",
    secondary = "secondary",
    protocol = "test",
    clientId = "test",
    pathToThriftJar = "test",
    serviceClass = "test",
    serviceName = "test",
    apiRoot = "test",
    enableThriftMux = false,
    relativeThreshold = 0.0,
    absoluteThreshold = 0.0,
    teamEmail = "test",
    emailDelay = Duration.fromSeconds(0),
    rootUrl = "test",
    allowHttpSideEffects = true,
    excludeHttpHeadersComparison = true,
    skipEmailsWhenNoErrors = false,
    httpsPort = "443",
    useFramedThriftTransport = false,
    responseMode = ServiceInstance.Primary,
    maxHeaderSize = StorageUnit.fromKilobytes(32),
    maxResponseSize = StorageUnit.fromMegabytes(5)
  )

  def makeEmptyJoinedDifferences = {
    val rawCounter = RawDifferenceCounter(new InMemoryDifferenceCounter())
    val noiseCounter = NoiseDifferenceCounter(new InMemoryDifferenceCounter())
    JoinedDifferences(rawCounter, noiseCounter)
  }

  def makePopulatedJoinedDifferences(endpoint : String, diffs : Map[String, Difference]) = {
    val rawCounter = RawDifferenceCounter(new InMemoryDifferenceCounter())
    val noiseCounter = NoiseDifferenceCounter(new InMemoryDifferenceCounter())
    val data = new InMemoryEndpointMetadata()
    data.add(diffs)
    rawCounter.counter.asInstanceOf[InMemoryDifferenceCounter].endpointsMap += (endpoint -> data)

    JoinedDifferences(rawCounter, noiseCounter)
  }
} 
Example 141
Source File: ExampleServers.scala    From diffy   with GNU Affero General Public License v3.0 5 votes vote down vote up
package ai.diffy.examples.thrift

import java.net.InetSocketAddress

import ai.diffy.thriftscala._
import com.twitter.finagle.ThriftMux
import com.twitter.util.Future

object ExampleServers {
  def main(args: Array[String]): Unit = {
    val primary = args(0).toInt
    val secondary = args(1).toInt
    val candidate = args(2).toInt

    val baseline = new AdderExample({case (a:Int,b:Int) => a + b})
    val sut = new AdderExample({case (a:Int,b:Int) => a * b})

    ThriftMux.server.serveIface(new InetSocketAddress(primary), baseline)
    ThriftMux.server.serveIface(new InetSocketAddress(secondary), baseline)
    ThriftMux.server.serveIface(new InetSocketAddress(candidate), sut)
  }
}

class AdderExample(f: (Int,Int) => Int) extends Adder.MethodPerEndpoint {
  override def add(a:Int,b:Int)= Future.value(f(a, b))
} 
Example 142
Source File: Settings.scala    From diffy   with GNU Affero General Public License v3.0 5 votes vote down vote up
package ai.diffy.proxy

import java.net.InetSocketAddress

import ai.diffy.util.ResourceMatcher
import ai.diffy.util.ServiceInstance
import com.twitter.util.{Duration, StorageUnit, Try}

case class Settings(
    datacenter: String,
    servicePort:InetSocketAddress,
    candidate: String,
    primary: String,
    secondary: String,
    protocol: String,
    clientId: String,
    pathToThriftJar: String,
    serviceClass: String,
    serviceName: String,
    apiRoot: String,
    enableThriftMux: Boolean,
    relativeThreshold: Double,
    absoluteThreshold: Double,
    teamEmail: String,
    emailDelay: Duration,
    rootUrl: String,
    allowHttpSideEffects: Boolean,
    excludeHttpHeadersComparison: Boolean,
    skipEmailsWhenNoErrors: Boolean,
    httpsPort: String,
    useFramedThriftTransport: Boolean,
    hostname: String = Try(java.lang.management.ManagementFactory.getRuntimeMXBean.getName.split("@")(1)).getOrElse("unknown"),
    user: String = Try(sys.env("USER")).getOrElse("unknown"),
    resourceMatcher: Option[ResourceMatcher] = None,
    responseMode: ServiceInstance = ServiceInstance.Primary,
    maxResponseSize: StorageUnit,
    maxHeaderSize: StorageUnit) 
Example 143
Source File: DeploySparkSubmit.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.kernel.remote

import java.io.File
import java.net.InetSocketAddress

import polynote.buildinfo.BuildInfo
import polynote.config.{PolynoteConfig, SparkConfig}
import polynote.kernel.{Kernel, LocalSparkKernelFactory, ScalaCompiler, remote}
import polynote.kernel.environment.{Config, CurrentNotebook}
import polynote.kernel.remote.SocketTransport.DeploySubprocess.DeployCommand
import polynote.kernel.util.pathOf
import polynote.messages.NotebookConfig
import polynote.runtime.KernelRuntime
import polynote.runtime.spark.reprs.SparkReprsOf
import zio.RIO

object DeploySparkSubmit extends DeployCommand {
  def parseQuotedArgs(str: String): List[String] = str.split('"').toList.sliding(2, 2).toList.flatMap {
    case nonQuoted :: quoted :: Nil => nonQuoted.split("\\s+").toList ::: quoted :: Nil
    case nonQuoted :: Nil => nonQuoted.split("\\s+").toList
    case _ => sys.error("impossible sliding state")
  }.map(_.trim).filterNot(_.isEmpty)

  def build(
    config: PolynoteConfig,
    nbConfig: NotebookConfig,
    notebookPath: String,
    mainClass: String = classOf[RemoteKernelClient].getName,
    jarLocation: String = getClass.getProtectionDomain.getCodeSource.getLocation.getPath,
    serverArgs: List[String] = Nil
  ): Seq[String] = {

    val sparkConfig = config.spark.map(_.properties).getOrElse(Map.empty) ++
      nbConfig.sparkTemplate.map(_.properties).getOrElse(Map.empty) ++
      nbConfig.sparkConfig.getOrElse(Map.empty)

    val sparkArgs = (sparkConfig - "sparkSubmitArgs" - "spark.driver.extraJavaOptions" - "spark.submit.deployMode" - "spark.driver.memory")
      .flatMap(kv => Seq("--conf", s"${kv._1}=${kv._2}"))

    val sparkSubmitArgs =
      nbConfig.sparkTemplate.flatMap(_.sparkSubmitArgs).toList.flatMap(parseQuotedArgs) ++
      sparkConfig.get("sparkSubmitArgs").toList.flatMap(parseQuotedArgs)

    val isRemote = sparkConfig.get("spark.submit.deployMode") contains "cluster"
    val libraryPath = List(sys.props.get("java.library.path"), sys.env.get("LD_LIBRARY_PATH"))
      .flatten
      .map(_.trim().stripPrefix(File.pathSeparator).stripSuffix(File.pathSeparator))
      .mkString(File.pathSeparator)

    val javaOptions = Map(
      "log4j.configuration" -> "log4j.properties",
      "java.library.path"   -> libraryPath
    )

    val allDriverOptions =
      sparkConfig.get("spark.driver.extraJavaOptions").toList ++
      javaOptions.toList.map {
        case (name, value) => s"-D$name=$value"
      } mkString " "

    val additionalJars = pathOf(classOf[SparkReprsOf[_]]) :: pathOf(classOf[KernelRuntime]) :: Nil

    val appName = sparkConfig.getOrElse("spark.app.name", s"Polynote ${BuildInfo.version}: $notebookPath")

    Seq("spark-submit", "--class", mainClass, "--name", appName) ++
      Seq("--driver-java-options", allDriverOptions) ++
      sparkConfig.get("spark.driver.memory").toList.flatMap(mem => List("--driver-memory", mem)) ++
      (if (isRemote) Seq("--deploy-mode", "cluster") else Nil) ++
      sparkSubmitArgs ++ Seq("--jars", additionalJars.mkString(",")) ++
      sparkArgs ++ Seq(jarLocation) ++ serverArgs
  }

  override def apply(serverAddress: InetSocketAddress): RIO[Config with CurrentNotebook, Seq[String]] = for {
    config   <- Config.access
    nbConfig <- CurrentNotebook.config
    path     <- CurrentNotebook.path
  } yield build(
    config,
    nbConfig,
    path,
    serverArgs =
      "--address" :: serverAddress.getAddress.getHostAddress ::
      "--port" :: serverAddress.getPort.toString ::
      "--kernelFactory" :: classOf[LocalSparkKernelFactory].getName ::
      Nil
  )
} 
Example 144
Source File: RedisChannel.scala    From laserdisc   with MIT License 5 votes vote down vote up
package laserdisc
package fs2

import java.net.InetSocketAddress

import _root_.fs2._
import _root_.fs2.io.tcp.{Socket, SocketGroup}
import cats.MonadError
import cats.effect.{Blocker, Concurrent, ContextShift, Resource}
import cats.syntax.flatMap._
import laserdisc.protocol._
import log.effect.LogWriter
import scodec.Codec
import scodec.stream.{StreamDecoder, StreamEncoder}

import scala.concurrent.duration.FiniteDuration

object RedisChannel {
  private[this] final val streamDecoder = StreamDecoder.many(Codec[RESP])
  private[this] final val streamEncoder = StreamEncoder.many(Codec[RESP])

  private[fs2] final def apply[F[_]: ContextShift: LogWriter: Concurrent](
      address: InetSocketAddress,
      writeTimeout: Option[FiniteDuration],
      readMaxBytes: Int
  )(blocker: Blocker): Pipe[F, RESP, RESP] = {
    def connectedSocket: Resource[F, Socket[F]] =
      SocketGroup(blocker, nonBlockingThreadCount = 4) >>= (_.client(address, noDelay = true))

    stream =>
      Stream.resource(connectedSocket) >>= { socket =>
        val send    = stream.through(impl.send(socket.writes(writeTimeout)))
        val receive = socket.reads(readMaxBytes).through(impl.receiveResp)

        send.drain
          .covaryOutput[RESP]
          .mergeHaltBoth(receive)
          .onFinalizeWeak(socket.endOfOutput)
      }
  }

  private[this] final object impl {
    def send[F[_]: MonadError[*[_], Throwable]](socketChannel: Pipe[F, Byte, Unit])(
        implicit log: LogWriter[F]
    ): Pipe[F, RESP, Unit] =
      _.evalTap(resp => log.trace(s"sending $resp"))
        .through(streamEncoder.encode[F])
        .flatMap(bits => Stream.chunk(Chunk.bytes(bits.toByteArray)))
        .through(socketChannel)

    def receiveResp[F[_]: MonadError[*[_], Throwable]](implicit log: LogWriter[F]): Pipe[F, Byte, RESP] = {
      def framing: Pipe[F, Byte, CompleteFrame] = {
        def loopScan(bytesIn: Stream[F, Byte], previous: RESPFrame): Pull[F, CompleteFrame, Unit] =
          bytesIn.pull.uncons.flatMap {
            case Some((chunk, rest)) =>
              previous.append(chunk.toByteBuffer) match {
                case Left(ex)                    => Pull.raiseError(ex)
                case Right(frame: CompleteFrame) => Pull.output1(frame) >> loopScan(rest, EmptyFrame)
                case Right(frame: MoreThanOneFrame) =>
                  Pull.output(Chunk.vector(frame.complete)) >> {
                    if (frame.remainder.isEmpty) loopScan(rest, EmptyFrame)
                    else loopScan(rest, IncompleteFrame(frame.remainder, 0L))
                  }
                case Right(frame: IncompleteFrame) => loopScan(rest, frame)
              }

            case _ => Pull.done
          }

        bytesIn => loopScan(bytesIn, EmptyFrame).stream
      }

      pipeIn =>
        streamDecoder
          .decode(pipeIn.through(framing) map (_.bits))
          .evalTap(resp => log.trace(s"receiving $resp"))
    }
  }
} 
Example 145
Source File: Api.scala    From whirlwind-tour-akka-typed   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.wtat

import akka.actor.{ ActorSystem, Scheduler }
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.model.StatusCodes.{ Conflict, Created, NoContent, NotFound }
import akka.http.scaladsl.server.{ Directives, Route }
import akka.stream.Materializer
import akka.actor.typed.scaladsl.Actor
import akka.actor.typed.scaladsl.AskPattern.Askable
import akka.actor.typed.{ ActorRef, Behavior }
import akka.util.Timeout
import de.heikoseeberger.akkahttpcirce.ErrorAccumulatingCirceSupport
import java.net.InetSocketAddress
import org.apache.logging.log4j.scala.Logging
import scala.concurrent.duration.FiniteDuration
import scala.util.{ Failure, Success }

object Api extends Logging {

  sealed trait Command
  private final case object HandleBindFailure                      extends Command
  private final case class HandleBound(address: InetSocketAddress) extends Command

  final val Name = "api"

  def apply(address: String,
            port: Int,
            userRepository: ActorRef[UserRepository.Command],
            userView: ActorRef[UserView.Command],
            askTimeout: FiniteDuration)(implicit mat: Materializer): Behavior[Command] =
    Actor.deferred { context =>
      import akka.actor.typed.scaladsl.adapter._
      import context.executionContext
      implicit val s: ActorSystem = context.system.toUntyped

      val self = context.self
      Http()
        .bindAndHandle(route(userRepository, userView)(askTimeout, context.system.scheduler),
                       address,
                       port)
        .onComplete {
          case Failure(_)                      => self ! HandleBindFailure
          case Success(ServerBinding(address)) => self ! HandleBound(address)
        }

      Actor.immutable {
        case (_, HandleBindFailure) =>
          logger.error(s"Stopping, because cannot bind to $address:$port!")
          Actor.stopped

        case (_, HandleBound(address)) =>
          logger.info(s"Bound to $address")
          Actor.ignore
      }
    }

  def route(
      userRepository: ActorRef[UserRepository.Command],
      userView: ActorRef[UserView.Command]
  )(implicit askTimeout: Timeout, scheduler: Scheduler): Route = {
    import Directives._
    import ErrorAccumulatingCirceSupport._
    import io.circe.generic.auto._
    import io.circe.refined._

    pathEndOrSingleSlash {
      get {
        complete {
          import UserView._
          (userView ? GetUsers).mapTo[Users]
        }
      } ~
      post {
        entity(as[User]) { user =>
          import UserRepository._
          onSuccess(userRepository ? addUser(user)) {
            case UsernameTaken(_) => complete(Conflict)
            case UserAdded(_)     => complete(Created)
          }
        }
      }
    } ~
    path(Segment) { username =>
      delete {
        import UserRepository._
        onSuccess(userRepository ? removeUser(username)) {
          case UsernameUnknown(_) => complete(NotFound)
          case UserRemoved(_)     => complete(NoContent)
        }
      }
    }
  }
} 
Example 146
Source File: CassandraClient.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package store.cassandra

import java.net.InetSocketAddress

import com.datastax.driver.core.{Cluster, Session}
import env.CassandraConfig
import libs.logs.{IzanamiLogger, ZLogger}
import zio.{Task, UIO, ZManaged}

object CassandraClient {

  def cassandraClient(mayBeConfig: Option[CassandraConfig]): ZManaged[ZLogger, Throwable, Option[(Cluster, Session)]] =
    mayBeConfig
      .map { config =>
        ZManaged
          .make(
            ZLogger.info(s"Initializing Cassandra cluster for ${config}") *> Task {
              val adds = config.addresses.map { add =>
                val Array(host, port) = add.split(":")
                new InetSocketAddress(host, port.toInt)
              }
              val builder: Cluster.Builder = Cluster.builder
                .withoutJMXReporting()
                .addContactPointsWithPorts(adds: _*)

              val b: Cluster.Builder = config.clusterName.map(builder.withClusterName).getOrElse(builder)

              val cluster: Cluster = (for {
                username <- config.username
                password <- config.password
              } yield {
                b.withCredentials(username, password)
              }).getOrElse(b).build()

              cluster.init()

              val session = cluster.connect()

              cluster.connect().execute(s"""
                                     |CREATE KEYSPACE IF NOT EXISTS ${config.keyspace} WITH REPLICATION = {
                                     | 'class' : 'SimpleStrategy', 'replication_factor' : ${config.replicationFactor}
                                     |}""".stripMargin)

              (cluster, session)
            }
          )(t => UIO(t._1.close))
          .map(Some.apply)

      }
      .getOrElse(ZManaged.effectTotal(None))
} 
Example 147
Source File: CorrelationIdMiddlewareTest.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.http4s.server.middleware

import java.net.InetSocketAddress

import cats.effect.{ContextShift, IO, Resource, Timer}
import com.avast.sst.http4s.server.Http4sRouting
import org.http4s.client.blaze.BlazeClientBuilder
import org.http4s.dsl.Http4sDsl
import org.http4s.server.blaze.BlazeServerBuilder
import org.http4s.util.CaseInsensitiveString
import org.http4s.{Header, HttpRoutes, Request, Uri}
import org.scalatest.funsuite.AsyncFunSuite

import scala.concurrent.ExecutionContext

@SuppressWarnings(Array("scalafix:Disable.get", "scalafix:Disable.toString", "scalafix:Disable.createUnresolved"))
class CorrelationIdMiddlewareTest extends AsyncFunSuite with Http4sDsl[IO] {

  implicit private val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  implicit private val timer: Timer[IO] = IO.timer(ExecutionContext.global)

  test("CorrelationIdMiddleware fills Request attributes and HTTP response header") {
    val test = for {
      middleware <- Resource.liftF(CorrelationIdMiddleware.default[IO])
      routes = Http4sRouting.make {
        middleware.wrap {
          HttpRoutes.of[IO] {
            case req @ GET -> Root / "test" =>
              val id = middleware.retrieveCorrelationId(req)
              Ok("test").map(_.withHeaders(Header("Attribute-Value", id.toString)))
          }
        }
      }
      server <- BlazeServerBuilder[IO](ExecutionContext.global)
        .bindSocketAddress(InetSocketAddress.createUnresolved("127.0.0.1", 0))
        .withHttpApp(routes)
        .resource
      client <- BlazeClientBuilder[IO](ExecutionContext.global).resource
    } yield (server, client)

    test
      .use {
        case (server, client) =>
          client
            .run(
              Request[IO](uri = Uri.unsafeFromString(s"http://${server.address.getHostString}:${server.address.getPort}/test"))
                .withHeaders(Header("Correlation-Id", "test-value"))
            )
            .use { response =>
              IO.delay {
                assert(response.headers.get(CaseInsensitiveString("Correlation-Id")).get.value === "test-value")
                assert(response.headers.get(CaseInsensitiveString("Attribute-Value")).get.value === "Some(CorrelationId(test-value))")
              }
            }
      }
      .unsafeToFuture()
  }

} 
Example 148
Source File: Http4sBlazeServerModule.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.http4s.server

import java.net.{InetSocketAddress, StandardSocketOptions}

import cats.effect.{ConcurrentEffect, Resource, Timer}
import org.http4s.HttpApp
import org.http4s.server.Server
import org.http4s.server.blaze.BlazeServerBuilder

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.Duration

object Http4sBlazeServerModule {

  
  def make[F[_]: ConcurrentEffect: Timer](
      config: Http4sBlazeServerConfig,
      httpApp: HttpApp[F],
      executionContext: ExecutionContext
  ): Resource[F, Server[F]] = {
    for {
      inetSocketAddress <- Resource.liftF(
        ConcurrentEffect[F].delay(
          InetSocketAddress.createUnresolved(config.listenAddress, config.listenPort)
        )
      )
      server <-
        BlazeServerBuilder[F](executionContext)
          .bindSocketAddress(inetSocketAddress)
          .withHttpApp(httpApp)
          .withoutBanner
          .withNio2(config.nio2Enabled)
          .withWebSockets(config.webSocketsEnabled)
          .enableHttp2(config.http2Enabled)
          .withResponseHeaderTimeout(Duration.fromNanos(config.responseHeaderTimeout.toNanos))
          .withIdleTimeout(Duration.fromNanos(config.idleTimeout.toNanos))
          .withBufferSize(config.bufferSize)
          .withMaxRequestLineLength(config.maxRequestLineLength)
          .withMaxHeadersLength(config.maxHeadersLength)
          .withChunkBufferMaxSize(config.chunkBufferMaxSize)
          .withConnectorPoolSize(config.connectorPoolSize)
          .withChannelOption[java.lang.Boolean](StandardSocketOptions.TCP_NODELAY, config.socketOptions.tcpNoDelay)
          .resource
    } yield server
  }
} 
Example 149
Source File: VertxServerRequest.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.server.vertx.decoders

import java.net.{InetSocketAddress, URI}

import io.vertx.scala.core.net.SocketAddress
import io.vertx.scala.ext.web.RoutingContext
import sttp.model.Method
import sttp.tapir.model.{ConnectionInfo, ServerRequest}

private[vertx] class VertxServerRequest(rc: RoutingContext) extends ServerRequest {
  private lazy val req = rc.request
  private lazy val _headers = req.headers
  lazy val connectionInfo: ConnectionInfo = {
    val conn = req.connection
    ConnectionInfo(
      Option(conn.localAddress).map(asInetSocketAddress),
      Option(conn.remoteAddress).map(asInetSocketAddress),
      Option(conn.isSsl)
    )
  }
  override def method: Method = Method.apply(req.rawMethod)
  override def protocol: String = req.scheme.get
  override def uri: URI = new URI(req.uri)
  override def headers: Seq[(String, String)] = _headers.names.map { key => (key, _headers.get(key).get) }.toSeq
  override def header(name: String): Option[String] = _headers.get(name)

  private def asInetSocketAddress(address: SocketAddress): InetSocketAddress =
    InetSocketAddress.createUnresolved(address.host, address.port)
} 
Example 150
Source File: package.scala    From scio   with Apache License 2.0 5 votes vote down vote up
package com.spotify.scio

import java.net.InetSocketAddress

import com.spotify.scio.io.ClosedTap
import com.spotify.scio.values.SCollection
import com.spotify.scio.coders.Coder
import com.spotify.scio.elasticsearch.ElasticsearchIO.{RetryConfig, WriteParam}
import org.apache.beam.sdk.io.elasticsearch.ElasticsearchIO.Write.BulkExecutionException
import org.elasticsearch.action.DocWriteRequest
import org.joda.time.Duration


    def saveAsElasticsearch(
      esOptions: ElasticsearchOptions,
      flushInterval: Duration = WriteParam.DefaultFlushInterval,
      numOfShards: Long = WriteParam.DefaultNumShards,
      maxBulkRequestSize: Int = WriteParam.DefaultMaxBulkRequestSize,
      errorFn: BulkExecutionException => Unit = WriteParam.DefaultErrorFn,
      retry: RetryConfig = WriteParam.DefaultRetryConfig
    )(f: T => Iterable[DocWriteRequest[_]])(implicit coder: Coder[T]): ClosedTap[Nothing] = {
      val param = WriteParam(f, errorFn, flushInterval, numOfShards, maxBulkRequestSize, retry)
      self.write(ElasticsearchIO[T](esOptions))(param)
    }
  }
} 
Example 151
Source File: IndexAdmin.scala    From scio   with Apache License 2.0 5 votes vote down vote up
package com.spotify.scio.elasticsearch

import java.net.InetSocketAddress

import org.elasticsearch.action.admin.indices.create.CreateIndexResponse
import org.elasticsearch.client.AdminClient
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.common.transport.TransportAddress
import org.elasticsearch.common.xcontent.XContentType
import org.elasticsearch.transport.client.PreBuiltTransportClient

import scala.util.Try

object IndexAdmin {
  private def adminClient[A](esOptions: ElasticsearchOptions)(f: AdminClient => A): Try[A] = {
    val settings: Settings =
      Settings.builder.put("cluster.name", esOptions.clusterName).build

    val transportAddresses: Seq[TransportAddress] = esOptions.servers
      .map(addr => new TransportAddress(addr))

    val client = new PreBuiltTransportClient(settings)
      .addTransportAddresses(transportAddresses: _*)

    val result = Try(f(client.admin()))
    client.close()
    result
  }

  
  private def ensureIndex(
    index: String,
    mappingSource: String,
    client: AdminClient
  ): CreateIndexResponse =
    client
      .indices()
      .prepareCreate(index)
      .setSource(mappingSource, XContentType.JSON)
      .get()
} 
Example 152
Source File: package.scala    From scio   with Apache License 2.0 5 votes vote down vote up
package com.spotify.scio

import java.net.InetSocketAddress

import com.spotify.scio.io.ClosedTap
import com.spotify.scio.values.SCollection
import com.spotify.scio.coders.Coder
import com.spotify.scio.elasticsearch.ElasticsearchIO.{RetryConfig, WriteParam}
import org.apache.beam.sdk.io.elasticsearch.ElasticsearchIO.Write.BulkExecutionException
import org.elasticsearch.action.DocWriteRequest
import org.joda.time.Duration


    def saveAsElasticsearch(
      esOptions: ElasticsearchOptions,
      flushInterval: Duration = WriteParam.DefaultFlushInterval,
      numOfShards: Long = WriteParam.DefaultNumShards,
      maxBulkRequestSize: Int = WriteParam.DefaultMaxBulkRequestSize,
      errorFn: BulkExecutionException => Unit = WriteParam.DefaultErrorFn,
      retry: RetryConfig = WriteParam.DefaultRetryConfig
    )(f: T => Iterable[DocWriteRequest[_]])(implicit coder: Coder[T]): ClosedTap[Nothing] = {
      val param = WriteParam(f, errorFn, flushInterval, numOfShards, maxBulkRequestSize, retry)
      self.write(ElasticsearchIO[T](esOptions))(param)
    }
  }
} 
Example 153
Source File: IndexAdmin.scala    From scio   with Apache License 2.0 5 votes vote down vote up
package com.spotify.scio.elasticsearch

import java.net.InetSocketAddress

import org.elasticsearch.action.admin.indices.create.CreateIndexResponse
import org.elasticsearch.client.AdminClient
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.common.transport.InetSocketTransportAddress
import org.elasticsearch.common.xcontent.XContentType
import org.elasticsearch.transport.client.PreBuiltTransportClient

import scala.util.Try

object IndexAdmin {
  private def adminClient[A](esOptions: ElasticsearchOptions)(f: AdminClient => A): Try[A] = {
    val settings: Settings =
      Settings.builder.put("cluster.name", esOptions.clusterName).build

    val transportAddresses: Seq[InetSocketTransportAddress] = esOptions.servers
      .map(addr => new InetSocketTransportAddress(addr))

    val client = new PreBuiltTransportClient(settings)
      .addTransportAddresses(transportAddresses: _*)

    val result = Try(f(client.admin()))
    client.close()
    result
  }

  
  private def ensureIndex(
    index: String,
    mappingSource: String,
    client: AdminClient
  ): CreateIndexResponse =
    client
      .indices()
      .prepareCreate(index)
      .setSource(mappingSource, XContentType.JSON)
      .get()
} 
Example 154
Source File: ServerHostTest.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.server

import java.net.{HttpURLConnection, InetAddress, InetSocketAddress, URL}

import org.scalamock.scalatest.MockFactory
import org.scalatest.{FreeSpec, Matchers}
import polynote.app.{App, Args, Environment, MainArgs}
import polynote.config._
import polynote.kernel.{BaseEnv, Kernel}
import polynote.kernel.environment.Config
import polynote.kernel.environment.Env.LayerOps
import polynote.kernel.interpreter.Interpreter
import polynote.kernel.logging.Logging
import polynote.server.auth.IdentityProvider
import polynote.server.repository.NotebookRepository
import polynote.server.repository.fs.FileSystems
import polynote.testing.{ConfiguredZIOSpec, ZIOSpec}
import zio.{RIO, Task, ZIO, ZLayer}
import zio.blocking.effectBlocking

class ServerHostTest extends FreeSpec with Matchers with ConfiguredZIOSpec with MockFactory {
  override val config: PolynoteConfig = PolynoteConfig(
    listen = Listen(host = "0.0.0.0", port = 0)
  )

  val configLayer: ZLayer[BaseEnv, Nothing, Config] = ZLayer.succeed(config)

  private def request(uri: String) = effectBlocking {
    val conn = new URL(uri).openConnection().asInstanceOf[HttpURLConnection]
    conn.setConnectTimeout(500)
    conn.connect()
    val responseCode = conn.getResponseCode
    responseCode shouldEqual 200
  }

  "Server" - {

    "listens on all interfaces when given listen=0.0.0.0" ignore {
      val kernel        = mock[Kernel]
      val kernelFactory = Kernel.Factory.const(kernel)
      val server        = new Server

      val serverEnv: ZLayer[BaseEnv, Throwable, server.MainEnv with MainArgs] =
        (configLayer andThen IdentityProvider.layer) ++
          Interpreter.Factories.load ++ ZLayer.succeed(kernelFactory) ++ ZLayer.succeed(Args(watchUI = true)) ++
          (configLayer ++ FileSystems.live >>> NotebookRepository.live)

      val run = server.server("TESTKEY").provideSomeLayer[BaseEnv](serverEnv).use {
        server =>
          for {
            localAddress <- effectBlocking(InetAddress.getLocalHost.getCanonicalHostName)
            _            <- server.awaitUp
            port         <- server.localAddress.map(_.asInstanceOf[InetSocketAddress].getPort)
            _            <- request(s"http://$localAddress:$port/")
            _            <- request(s"http://127.0.0.1:$port/")
            _            <- server.shutdown()
          } yield ()
      }

      run.runIO()
    }

  }

} 
Example 155
Source File: RedisAddress.scala    From laserdisc   with MIT License 5 votes vote down vote up
package laserdisc
package fs2

import java.net.InetSocketAddress

import cats.{ApplicativeError, Eq}
import cats.syntax.eq._

final case class RedisAddress(host: Host, port: Port) {
  def toInetSocketAddress[F[_]](implicit F: ApplicativeError[F, Throwable]): F[InetSocketAddress] =
    F.catchNonFatal {
      new InetSocketAddress(host.value, port.value)
    }
  override def toString: String = host.value + ":" + port.value
}

object RedisAddress {
  implicit final val redisAddressEq: Eq[RedisAddress] = Eq.instance { (a1, a2) =>
    a1.host.value === a2.host.value && a1.port.value === a2.port.value
  }
} 
Example 156
Source File: ExampleApp.scala    From caliban   with Apache License 2.0 5 votes vote down vote up
package caliban.uzhttp

import java.net.InetSocketAddress

import _root_.uzhttp.server._
import caliban.ExampleData._
import caliban._
import zio.{ App, ExitCode, ZEnv, ZIO }

object ExampleApp extends App {

  override def run(args: List[String]): ZIO[ZEnv, Nothing, ExitCode] =
    (for {
      interpreter <- ExampleApi.api.interpreter
      address     = new InetSocketAddress(8088)
      route       = UzHttpAdapter.makeHttpService("/api/graphql", interpreter)
      wsRoute     = UzHttpAdapter.makeWebSocketService("/ws/graphql", interpreter)
      server      = Server.builder(address).handleSome(route orElse wsRoute)
      _           <- server.serve.useForever.provideCustomLayer(ExampleService.make(sampleCharacters))
    } yield ()).exitCode

} 
Example 157
Source File: VoiceUDPFlow.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.voice

import java.net.InetSocketAddress
import java.nio.ByteOrder

import scala.concurrent.{Future, Promise}

import ackcord.data.{RawSnowflake, UserId}
import ackcord.util.UdpConnectedFlow
import akka.NotUsed
import akka.actor.typed.ActorSystem
import akka.stream.scaladsl.{BidiFlow, Concat, Flow, GraphDSL, Keep, Source}
import akka.stream.{BidiShape, OverflowStrategy}
import akka.util.ByteString

object VoiceUDPFlow {

  val silence = ByteString(0xF8, 0xFF, 0xFE)

  val SampleRate = 48000
  val FrameSize  = 960
  val FrameTime  = 20

  def flow[Mat](
      remoteAddress: InetSocketAddress,
      ssrc: Int,
      serverId: RawSnowflake,
      userId: UserId,
      secretKeys: Source[Option[ByteString], Mat]
  )(implicit system: ActorSystem[Nothing]): Flow[ByteString, AudioAPIMessage.ReceivedData, (Mat, Future[FoundIP])] =
    NaclBidiFlow
      .bidiFlow(ssrc, serverId, userId, secretKeys)
      .atopMat(voiceBidi(ssrc).reversed)(Keep.both)
      .async
      .join(Flow[ByteString].buffer(32, OverflowStrategy.backpressure).via(UdpConnectedFlow.flow(remoteAddress)))

  def voiceBidi(ssrc: Int): BidiFlow[ByteString, ByteString, ByteString, ByteString, Future[FoundIP]] = {
    implicit val byteOrder: ByteOrder = ByteOrder.BIG_ENDIAN
    val ipDiscoveryPacket = {
      val byteBuilder = ByteString.createBuilder
      byteBuilder.sizeHint(74)
      byteBuilder.putShort(0x1).putShort(70).putInt(ssrc)

      byteBuilder.putBytes(new Array[Byte](66))

      byteBuilder.result()
    }

    val valvePromise = Promise[Unit]
    val valve        = Source.future(valvePromise.future).drop(1).asInstanceOf[Source[ByteString, NotUsed]]

    val ipDiscoveryFlow = Flow[ByteString]
      .viaMat(new IPDiscoveryFlow(() => valvePromise.success(())))(Keep.right)

    BidiFlow
      .fromGraph(GraphDSL.create(ipDiscoveryFlow) { implicit b => ipDiscovery =>
        import GraphDSL.Implicits._

        val voiceIn = b.add(Flow[ByteString])

        val ipDiscoverySource           = b.add(Source.single(ipDiscoveryPacket) ++ valve)
        val ipDiscoveryAndThenVoiceData = b.add(Concat[ByteString]())

        ipDiscoverySource ~> ipDiscoveryAndThenVoiceData
        voiceIn ~> ipDiscoveryAndThenVoiceData

        BidiShape(
          ipDiscovery.in,
          ipDiscovery.out,
          voiceIn.in,
          ipDiscoveryAndThenVoiceData.out
        )
      })
  }

  
  case class FoundIP(address: String, port: Int)
} 
Example 158
Source File: VoiceUDPHandler.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.voice

import java.net.InetSocketAddress

import scala.concurrent.duration._
import scala.util.{Failure, Success}

import ackcord.data.{RawSnowflake, UserId}
import akka.NotUsed
import akka.actor.typed._
import akka.actor.typed.scaladsl._
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Keep, Sink, Source, SourceQueueWithComplete}
import akka.util.ByteString
import org.slf4j.Logger

object VoiceUDPHandler {

  def apply(
      address: String,
      port: Int,
      ssrc: Int,
      serverId: RawSnowflake,
      userId: UserId,
      soundProducer: Source[ByteString, NotUsed],
      soundConsumer: Sink[AudioAPIMessage, NotUsed],
      parent: ActorRef[VoiceHandler.Command]
  ): Behavior[Command] =
    Behaviors
      .supervise(
        Behaviors.setup[Command] { ctx =>
          implicit val system: ActorSystem[Nothing] = ctx.system

          val ((queue, futIp), watchDone) = soundProducer
            .viaMat(
              VoiceUDPFlow
                .flow(
                  new InetSocketAddress(address, port),
                  ssrc,
                  serverId,
                  userId,
                  Source.queue[Option[ByteString]](0, OverflowStrategy.dropBuffer)
                )
                .watchTermination()(Keep.both)
            )(Keep.right)
            .to(soundConsumer)
            .run()

          ctx.pipeToSelf(futIp) {
            case Success(value) => IPDiscoveryResult(value)
            case Failure(e)     => SendExeption(e)
          }
          ctx.pipeToSelf(watchDone)(_ => ConnectionDied)

          handle(ctx, ctx.log, ssrc, queue, parent)
        }
      )
      .onFailure(
        SupervisorStrategy
          .restartWithBackoff(100.millis, 5.seconds, 1D)
          .withResetBackoffAfter(10.seconds)
          .withMaxRestarts(5)
      )

  def handle(
      ctx: ActorContext[Command],
      log: Logger,
      ssrc: Int,
      queue: SourceQueueWithComplete[Option[ByteString]],
      parent: ActorRef[VoiceHandler.Command]
  ): Behavior[Command] = Behaviors.receiveMessage {
    case SendExeption(e) => throw e
    case ConnectionDied  => Behaviors.stopped
    case Shutdown =>
      queue.complete()
      Behaviors.same
    case IPDiscoveryResult(VoiceUDPFlow.FoundIP(localAddress, localPort)) =>
      parent ! VoiceHandler.GotLocalIP(localAddress, localPort)
      Behaviors.same
    case SetSecretKey(key) =>
      queue.offer(key)
      Behaviors.same
  }

  sealed trait Command

  case object Shutdown extends Command

  private case class SendExeption(e: Throwable)                       extends Command
  private case object ConnectionDied                                  extends Command
  private case class IPDiscoveryResult(foundIP: VoiceUDPFlow.FoundIP) extends Command
  private[voice] case class SetSecretKey(key: Option[ByteString])     extends Command
} 
Example 159
Source File: SocketSpec.scala    From asyncdb   with Apache License 2.0 5 votes vote down vote up
package io.asyncdb
package netty
package mysql

import cats.effect._
import io.netty.bootstrap._
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio._
import java.net.InetSocketAddress
import scala.concurrent.{Future, ExecutionContext}
import scala.language.implicitConversions

abstract class SocketSpec extends Spec {

  implicit val contextShift                = IO.contextShift(ExecutionContext.global)

  implicit def effectAsFuture[A](f: IO[A]) = f.unsafeToFuture

  protected val config = {
    val host: String = "127.0.0.1"
    val port: Int    = 3306
    val address      = new InetSocketAddress(host, port)
    val b = (new Bootstrap)
      .remoteAddress(address)
      .group(new NioEventLoopGroup())
      .channel(classOf[NioSocketChannel])
    MySQLSocketConfig(
      bootstrap = b,
      charset = CharsetMap.Utf8_general_ci,
      database = Some("asyncdb"),
      username = "asyncdb",
      password = Some("asyncdb"),
      authMethod = None
    )
  }

  protected def withSocket[A](cfg: MySQLSocketConfig)(f: MySQLSocket[IO] => IO[A]): IO[A] = {
    Resource.make(MySQLSocket[IO](cfg).flatMap(_.connect))(_.disconnect).use(f)
  }

  protected def withSocket[A](f: MySQLSocket[IO] => IO[A]): IO[A] = withSocket[A](config)(f)

} 
Example 160
Source File: TransportServer.scala    From aloha   with Apache License 2.0 5 votes vote down vote up
package me.jrwang.aloha.transport.server

import java.io.Closeable
import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

import io.netty.bootstrap.ServerBootstrap
import io.netty.channel.{ChannelFuture, ChannelInitializer, ChannelOption}
import io.netty.channel.socket.SocketChannel
import me.jrwang.aloha.common.Logging
import me.jrwang.aloha.common.util.Utils
import me.jrwang.aloha.transport.TransportContext
import me.jrwang.aloha.transport.util.{IOMode, NettyUtils}


class TransportServer(
    transportContext: TransportContext,
    hostToBind: String,
    portToBind: Int,
    appRpcHandler: RpcHandler,
    bootstraps: List[TransportServerBootstrap]
  ) extends Closeable with Logging {
  private val conf  = transportContext.conf

  private var port: Int = -1
  private var bootstrap: ServerBootstrap = _
  private var channelFuture: ChannelFuture = _

  try
    init()
  catch {
    case e: RuntimeException =>
      Utils.closeQuietly(this)
      throw e
  }

  def init(): Unit = {
    val ioMode = IOMode.valueOf(conf.ioMode)
    val bossGroup = NettyUtils.createEventLoop(ioMode, conf.serverThreads, conf.module + "-server")
    val workerGroup = bossGroup
    val allocator = NettyUtils.createPooledByteBufAllocator(conf.preferDirectBufs, true , conf.serverThreads)

    bootstrap = new ServerBootstrap()
      .group(bossGroup, workerGroup)
      .channel(NettyUtils.getServerChannelClass(ioMode))
      .option(ChannelOption.ALLOCATOR, allocator)
      .childOption(ChannelOption.ALLOCATOR, allocator)

    if (conf.backLog > 0)
      bootstrap.option[java.lang.Integer](ChannelOption.SO_BACKLOG, conf.backLog)
    if (conf.receiveBuf > 0)
      bootstrap.childOption[java.lang.Integer](ChannelOption.SO_RCVBUF, conf.receiveBuf)
    if (conf.sendBuf > 0)
      bootstrap.childOption[java.lang.Integer](ChannelOption.SO_SNDBUF, conf.sendBuf)

    bootstrap.childHandler(new ChannelInitializer[SocketChannel]() {
      override protected def initChannel(ch: SocketChannel): Unit = {
        val rpcHandler = bootstraps.foldLeft[RpcHandler](appRpcHandler)((r, b) => {
          b.doBootstrap(ch, r)
        })
        transportContext.initializePipeline(ch, rpcHandler)
      }
    })

    val address = if (hostToBind == null)
      new InetSocketAddress(portToBind)
    else
      new InetSocketAddress(hostToBind, portToBind)
    channelFuture = bootstrap.bind(address)
    channelFuture.syncUninterruptibly
    port = channelFuture.channel.localAddress.asInstanceOf[InetSocketAddress].getPort
    logDebug(s"Transport server started on port: $port")
  }

  def getPort: Int = {
    if (port == -1)
      throw new IllegalStateException("Server not initialized")
    port
  }

  def awaitTermination(): Unit = {
    channelFuture.channel().closeFuture().sync()
  }

  override def close(): Unit = {
    if (channelFuture != null) {
      // close is a local operation and should finish within milliseconds; timeout just to be safe
      channelFuture.channel.close.awaitUninterruptibly(10, TimeUnit.SECONDS)
      channelFuture = null
    }
    if (bootstrap != null && bootstrap.config().group() != null)
      bootstrap.config().group().shutdownGracefully
    if (bootstrap != null && bootstrap.config().childGroup() != null)
      bootstrap.config().childGroup().shutdownGracefully
    bootstrap = null
  }
} 
Example 161
Source File: NetworkSettingsSpecification.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.settings

import java.net.InetSocketAddress

import com.typesafe.config.ConfigFactory
import net.ceedubs.ficus.Ficus._
import org.scalatest.{FlatSpec, Matchers}

import scala.concurrent.duration._

class NetworkSettingsSpecification extends FlatSpec with Matchers {

  "NetworkSpecification" should "read values from config" in {
    val config          = loadConfig(ConfigFactory.parseString("""waves.network {
        |  bind-address: "127.0.0.1"
        |  port: 6868
        |  node-name: "default-node-name"
        |  declared-address: "127.0.0.1:6868"
        |  nonce: 0
        |  known-peers = ["8.8.8.8:6868", "4.4.8.8:6868"]
        |  local-only: no
        |  peers-data-residence-time: 1d
        |  black-list-residence-time: 10m
        |  break-idle-connections-timeout: 53s
        |  max-inbound-connections: 30
        |  max-outbound-connections = 20
        |  max-single-host-connections = 2
        |  connection-timeout: 30s
        |  max-unverified-peers: 0
        |  peers-broadcast-interval: 2m
        |  black-list-threshold: 50
        |  unrequested-packets-threshold: 100
        |  upnp {
        |    enable: yes
        |    gateway-timeout: 10s
        |    discover-timeout: 10s
        |  }
        |  traffic-logger {
        |    ignore-tx-messages = [28]
        |    ignore-rx-messages = [23]
        |  }
        |}""".stripMargin))
    val networkSettings = config.as[NetworkSettings]("waves.network")

    networkSettings.bindAddress should be(new InetSocketAddress("127.0.0.1", 6868))
    networkSettings.nodeName should be("default-node-name")
    networkSettings.declaredAddress should be(Some(new InetSocketAddress("127.0.0.1", 6868)))
    networkSettings.nonce should be(0)
    networkSettings.knownPeers should be(List("8.8.8.8:6868", "4.4.8.8:6868"))
    networkSettings.peersDataResidenceTime should be(1.day)
    networkSettings.blackListResidenceTime should be(10.minutes)
    networkSettings.breakIdleConnectionsTimeout should be(53.seconds)
    networkSettings.maxInboundConnections should be(30)
    networkSettings.maxOutboundConnections should be(20)
    networkSettings.maxConnectionsPerHost should be(2)
    networkSettings.connectionTimeout should be(30.seconds)
    networkSettings.maxUnverifiedPeers should be(0)
    networkSettings.peersBroadcastInterval should be(2.minutes)
    networkSettings.uPnPSettings.enable should be(true)
    networkSettings.uPnPSettings.gatewayTimeout should be(10.seconds)
    networkSettings.uPnPSettings.discoverTimeout should be(10.seconds)
    networkSettings.trafficLogger.ignoreTxMessages should be(Set(28))
    networkSettings.trafficLogger.ignoreRxMessages should be(Set(23))
  }

  it should "generate random nonce" in {
    val config          = loadConfig(ConfigFactory.empty())
    val networkSettings = config.as[NetworkSettings]("waves.network")

    networkSettings.nonce should not be 0
  }

  it should "build node name using nonce" in {
    val config          = loadConfig(ConfigFactory.parseString("waves.network.nonce = 12345"))
    val networkSettings = config.as[NetworkSettings]("waves.network")

    networkSettings.nonce should be(12345)
    networkSettings.nodeName should be("Node-12345")
  }

  it should "build node name using random nonce" in {
    val config          = loadConfig(ConfigFactory.empty())
    val networkSettings = config.as[NetworkSettings]("waves.network")

    networkSettings.nonce should not be 0
    networkSettings.nodeName should be(s"Node-${networkSettings.nonce}")
  }

  it should "fail with IllegalArgumentException on too long node name" in {
    val config = loadConfig(ConfigFactory.parseString(
      "waves.network.node-name = очень-длинное-название-в-многобайтной-кодировке-отличной-от-однобайтной-кодировки-американского-института-стандартов"))
    intercept[IllegalArgumentException] {
      config.as[NetworkSettings]("waves.network")
    }
  }
} 
Example 162
Source File: BlacklistSpecification.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import java.net.{InetAddress, InetSocketAddress}

import com.typesafe.config.ConfigFactory
import com.wavesplatform.settings.NetworkSettings
import net.ceedubs.ficus.Ficus._
import org.scalatest.{FeatureSpec, GivenWhenThen}

class BlacklistSpecification extends FeatureSpec with GivenWhenThen {
  private val config = ConfigFactory.parseString("""waves.network {
      |  known-peers = []
      |  file = null
      |  black-list-residence-time: 1s
      |}""".stripMargin).withFallback(ConfigFactory.load()).resolve()

  private val networkSettings = config.as[NetworkSettings]("waves.network")

  info("As a Peer")
  info("I want to blacklist other peers for certain time")
  info("So I can give them another chance after")

  feature("Blacklist") {
    scenario("Peer blacklist another peer") {

      Given("Peer database is empty")
      val peerDatabase = new PeerDatabaseImpl(networkSettings)

      def isBlacklisted(address: InetSocketAddress) = peerDatabase.blacklistedHosts.contains(address.getAddress)

      assert(peerDatabase.knownPeers.isEmpty)
      assert(peerDatabase.blacklistedHosts.isEmpty)

      When("Peer adds another peer to knownPeers")
      val address = new InetSocketAddress(InetAddress.getByName("localhost"), 1234)
      peerDatabase.touch(address)
      assert(peerDatabase.knownPeers.contains(address))
      assert(!isBlacklisted(address))

      And("Peer blacklists another peer")
      peerDatabase.blacklist(address.getAddress, "")
      assert(isBlacklisted(address))
      assert(!peerDatabase.knownPeers.contains(address))

      And("Peer waits for some time")
      Thread.sleep(networkSettings.blackListResidenceTime.toMillis + 500)

      Then("Another peer disappear from blacklist")
      assert(!isBlacklisted(address))

      And("Another peer became known")
      assert(peerDatabase.knownPeers.contains(address))
    }
  }
} 
Example 163
Source File: PeerSynchronizer.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import java.net.InetSocketAddress

import com.wavesplatform.utils.ScorexLogging
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel.{ChannelHandlerContext, ChannelInboundHandlerAdapter}

import scala.concurrent.duration.FiniteDuration

class PeerSynchronizer(peerDatabase: PeerDatabase, peerRequestInterval: FiniteDuration) extends ChannelInboundHandlerAdapter with ScorexLogging {

  private var peersRequested  = false
  private var declaredAddress = Option.empty[InetSocketAddress]

  def requestPeers(ctx: ChannelHandlerContext): Unit = if (ctx.channel().isActive) {
    peersRequested = true
    ctx.writeAndFlush(GetPeers)

    ctx.executor().schedule(peerRequestInterval) {
      requestPeers(ctx)
    }
  }

  override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef): Unit = {
    declaredAddress.foreach(peerDatabase.touch)
    msg match {
      case hs: Handshake =>
        val rda = for {
          rda        <- hs.declaredAddress
          rdaAddress <- Option(rda.getAddress)
          ctxAddress <- ctx.remoteAddress.map(_.getAddress)
          if rdaAddress == ctxAddress
        } yield rda

        rda match {
          case None => log.debug(s"${id(ctx)} Declared address $rda does not match actual remote address ${ctx.remoteAddress.map(_.getAddress)}")
          case Some(x) =>
            log.trace(s"${id(ctx)} Touching declared address")
            peerDatabase.touch(x)
            declaredAddress = Some(x)
        }

        requestPeers(ctx)
        super.channelRead(ctx, msg)
      case GetPeers =>
        ctx.writeAndFlush(KnownPeers(peerDatabase.knownPeers.keys.toSeq))
      case KnownPeers(peers) if peersRequested =>
        peersRequested = false
        val (added, notAdded) = peers.partition(peerDatabase.addCandidate)
        log.trace(s"${id(ctx)} Added peers: ${format(added)}, not added peers: ${format(notAdded)}")
      case KnownPeers(peers) =>
        log.trace(s"${id(ctx)} Got unexpected list of known peers containing ${peers.size} entries")
      case _ =>
        super.channelRead(ctx, msg)
    }
  }

  private def format[T](xs: Iterable[T]): String = xs.mkString("[", ", ", "]")
}

object PeerSynchronizer {

  @Sharable
  class NoopPeerSynchronizer extends ChannelInboundHandlerAdapter {

    override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef): Unit = {
      msg match {
        case GetPeers      =>
        case KnownPeers(_) =>
        case _ =>
          super.channelRead(ctx, msg)
      }
    }
  }

  val Disabled = new NoopPeerSynchronizer()

} 
Example 164
Source File: FlumeTestUtils.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.flume

import java.net.{InetSocketAddress, ServerSocket}
import java.nio.ByteBuffer
import java.util.{List => JList}

import scala.collection.JavaConversions._

import com.google.common.base.Charsets.UTF_8
import org.apache.avro.ipc.NettyTransceiver
import org.apache.avro.ipc.specific.SpecificRequestor
import org.apache.commons.lang3.RandomUtils
import org.apache.flume.source.avro
import org.apache.flume.source.avro.{AvroSourceProtocol, AvroFlumeEvent}
import org.jboss.netty.channel.ChannelPipeline
import org.jboss.netty.channel.socket.SocketChannel
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory
import org.jboss.netty.handler.codec.compression.{ZlibDecoder, ZlibEncoder}

import org.apache.spark.util.Utils
import org.apache.spark.SparkConf


  private class CompressionChannelFactory(compressionLevel: Int)
    extends NioClientSocketChannelFactory {

    override def newChannel(pipeline: ChannelPipeline): SocketChannel = {
      val encoder = new ZlibEncoder(compressionLevel)
      pipeline.addFirst("deflater", encoder)
      pipeline.addFirst("inflater", new ZlibDecoder())
      super.newChannel(pipeline)
    }
  }

} 
Example 165
Source File: FlumeTestUtils.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.flume

import java.net.{InetSocketAddress, ServerSocket}
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util.{List => JList}
import java.util.Collections

import scala.collection.JavaConverters._

import org.apache.avro.ipc.NettyTransceiver
import org.apache.avro.ipc.specific.SpecificRequestor
import org.apache.commons.lang3.RandomUtils
import org.apache.flume.source.avro
import org.apache.flume.source.avro.{AvroFlumeEvent, AvroSourceProtocol}
import org.jboss.netty.channel.ChannelPipeline
import org.jboss.netty.channel.socket.SocketChannel
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory
import org.jboss.netty.handler.codec.compression.{ZlibDecoder, ZlibEncoder}

import org.apache.spark.SparkConf
import org.apache.spark.util.Utils


  private class CompressionChannelFactory(compressionLevel: Int)
    extends NioClientSocketChannelFactory {

    override def newChannel(pipeline: ChannelPipeline): SocketChannel = {
      val encoder = new ZlibEncoder(compressionLevel)
      pipeline.addFirst("deflater", encoder)
      pipeline.addFirst("inflater", new ZlibDecoder())
      super.newChannel(pipeline)
    }
  }

} 
Example 166
Source File: MutableServiceDiscovery.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.scaladsl.tools

import java.net.InetSocketAddress

import akka.discovery.Lookup
import akka.discovery.ServiceDiscovery
import akka.discovery.ServiceDiscovery.Resolved
import akka.discovery.ServiceDiscovery.ResolvedTarget
import akka.http.scaladsl.Http

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration


final class MutableServiceDiscovery(targets: List[InetSocketAddress]) extends ServiceDiscovery {
  var services: Future[Resolved] = _

  setServices(targets)

  def setServices(targets: List[InetSocketAddress]): Unit =
    services = Future.successful(
      Resolved(
        "greeter",
        targets.map(target => ResolvedTarget(target.getHostString, Some(target.getPort), Some(target.getAddress)))))

  override def lookup(query: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = {
    require(query.serviceName == "greeter")
    services
  }
}

object MutableServiceDiscovery {
  def apply(targets: List[Http.ServerBinding]) = new MutableServiceDiscovery(targets.map(_.localAddress))
} 
Example 167
Source File: AkkaDiscoveryNameResolverProviderSpec.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.internal

import java.net.URI
import java.net.InetSocketAddress
import java.util.{ List => JList }

import scala.concurrent.ExecutionContext.Implicits._
import scala.concurrent.Future
import scala.concurrent.Promise
import scala.concurrent.duration._
import scala.collection.immutable

import io.grpc.Attributes
import io.grpc.NameResolver.Listener
import io.grpc.EquivalentAddressGroup

import akka.actor.ActorSystem
import akka.discovery.Lookup
import akka.discovery.ServiceDiscovery
import akka.discovery.ServiceDiscovery.Resolved
import akka.discovery.ServiceDiscovery.ResolvedTarget
import akka.testkit.TestKit

import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.wordspec.AnyWordSpecLike

class AkkaDiscoveryNameResolverProviderSpec
    extends TestKit(ActorSystem())
    with AnyWordSpecLike
    with Matchers
    with ScalaFutures {

  implicit override val patienceConfig =
    PatienceConfig(timeout = scaled(Span(2, Seconds)), interval = scaled(Span(5, Millis)))

  "AkkaDiscoveryNameResolverProviderSpec" should {
    "provide a NameResolver that uses the supplied serviceName" in {
      val serviceName = "testServiceName"
      val discovery = new ServiceDiscovery() {
        override def lookup(lookup: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = {
          lookup.serviceName should be(serviceName)
          Future.successful(Resolved(serviceName, immutable.Seq(ResolvedTarget("10.0.0.3", Some(4312), None))))
        }
      }
      val provider = new AkkaDiscoveryNameResolverProvider(
        discovery,
        443,
        portName = None,
        protocol = None,
        resolveTimeout = 3.seconds)

      val resolver = provider.newNameResolver(new URI("//" + serviceName), null)

      val addressGroupsPromise = Promise[List[EquivalentAddressGroup]]
      val listener = new Listener() {
        override def onAddresses(addresses: JList[EquivalentAddressGroup], attributes: Attributes): Unit = {
          import scala.collection.JavaConverters._
          addressGroupsPromise.success(addresses.asScala.toList)
        }
        override def onError(error: io.grpc.Status): Unit = ???
      }
      resolver.start(listener)
      val addressGroups = addressGroupsPromise.future.futureValue
      addressGroups.size should be(1)
      val addresses = addressGroups(0).getAddresses()
      addresses.size should be(1)
      val address = addresses.get(0).asInstanceOf[InetSocketAddress]
      address.getHostString() should be("10.0.0.3")
      address.getPort() should be(4312)
    }
  }

} 
Example 168
Source File: AkkaDiscoveryNameResolverSpec.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.internal

import java.net.InetSocketAddress

import akka.actor.ActorSystem
import akka.grpc.{ GrpcClientSettings, GrpcServiceException }
import akka.testkit.TestKit
import io.grpc.Status
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.wordspec.AnyWordSpecLike

import scala.collection.JavaConverters._

class AkkaDiscoveryNameResolverSpec
    extends TestKit(ActorSystem())
    with AnyWordSpecLike
    with Matchers
    with ScalaFutures {
  implicit val ex = system.dispatcher
  implicit override val patienceConfig =
    PatienceConfig(timeout = scaled(Span(2, Seconds)), interval = scaled(Span(5, Millis)))

  "The AkkaDiscovery-backed NameResolver" should {
    "correctly report an error for an unknown hostname" in {
      val host = "example.invalid"
      val resolver = AkkaDiscoveryNameResolver(GrpcClientSettings.connectToServiceAt(host, 80))
      val probe = new NameResolverListenerProbe()

      resolver.start(probe)

      val exception = probe.future.failed.futureValue.asInstanceOf[GrpcServiceException]
      exception shouldBe an[GrpcServiceException]
      exception.status.getCode == Status.UNKNOWN.getCode
      // FIXME: This description is not portable - it arises from native function response, which differs by OS
      // exception.status.getDescription should equal(host + ": Name or service not known")
    }

    "support serving a static host/port" in {
      // Unfortunately it needs to be an actually resolvable address...
      val host = "akka.io"
      val port = 4040
      val resolver = AkkaDiscoveryNameResolver(GrpcClientSettings.connectToServiceAt(host, port))
      val probe = new NameResolverListenerProbe()

      resolver.start(probe)

      val addresses = probe.future.futureValue match {
        case Seq(addressGroup) => addressGroup.getAddresses
        case _                 => fail("Expected a single address group")
      }
      addresses.asScala.toSeq match {
        case Seq(address: InetSocketAddress) =>
          address.getPort should be(port)
          address.getAddress.getHostName should be(host)
        case other =>
          fail(s"Expected a single InetSocketAddress, got $other")
      }
    }
  }
} 
Example 169
Source File: AkkaDiscoveryNameResolver.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.internal

import java.net.{ InetAddress, InetSocketAddress, UnknownHostException }

import akka.discovery.ServiceDiscovery.ResolvedTarget
import akka.discovery.{ Lookup, ServiceDiscovery }
import akka.grpc.GrpcClientSettings
import io.grpc.{ Attributes, EquivalentAddressGroup, NameResolver, Status }
import io.grpc.NameResolver.Listener

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ ExecutionContext, Promise }
import scala.util.{ Failure, Success }

class AkkaDiscoveryNameResolver(
    discovery: ServiceDiscovery,
    defaultPort: Int,
    serviceName: String,
    portName: Option[String],
    protocol: Option[String],
    resolveTimeout: FiniteDuration)(implicit val ec: ExecutionContext)
    extends NameResolver {
  override def getServiceAuthority: String = serviceName

  val listener: Promise[Listener] = Promise()

  override def start(l: Listener): Unit = {
    listener.trySuccess(l)
    lookup(l)
  }

  override def refresh(): Unit =
    listener.future.onComplete {
      case Success(l) => lookup(l)
      case Failure(_) => // We never fail this promise
    }

  def lookup(listener: Listener): Unit = {
    discovery.lookup(Lookup(serviceName, portName, protocol), resolveTimeout).onComplete {
      case Success(result) =>
        try {
          listener.onAddresses(addresses(result.addresses), Attributes.EMPTY)
        } catch {
          case e: UnknownHostException =>
            // TODO at least log
            listener.onError(Status.UNKNOWN.withDescription(e.getMessage))
        }
      case Failure(e) =>
        // TODO at least log
        listener.onError(Status.UNKNOWN.withDescription(e.getMessage))
    }
  }

  @throws[UnknownHostException]
  private def addresses(addresses: Seq[ResolvedTarget]) = {
    import scala.collection.JavaConverters._
    addresses
      .map(target => {
        val port = target.port.getOrElse(defaultPort)
        val address = target.address.getOrElse(InetAddress.getByName(target.host))
        new EquivalentAddressGroup(new InetSocketAddress(address, port))
      })
      .asJava
  }

  override def shutdown(): Unit = ()
}

object AkkaDiscoveryNameResolver {
  def apply(settings: GrpcClientSettings)(implicit ec: ExecutionContext): AkkaDiscoveryNameResolver =
    new AkkaDiscoveryNameResolver(
      settings.serviceDiscovery,
      settings.defaultPort,
      settings.serviceName,
      settings.servicePortName,
      settings.serviceProtocol,
      settings.resolveTimeout)
} 
Example 170
Source File: WsServer.scala    From seed   with Apache License 2.0 5 votes vote down vote up
package seed.cli.util

import java.net.InetSocketAddress
import java.nio.ByteBuffer

import org.java_websocket.WebSocket
import org.java_websocket.handshake.ClientHandshake
import org.java_websocket.server.WebSocketServer

import io.circe.parser.decode

import seed.Log
import seed.cli.WsCommand

class WsServer(
  address: InetSocketAddress,
  onDisconnect: WebSocket => Unit,
  evalCommand: (WsServer, WebSocket, WsCommand) => Unit,
  log: Log
) extends WebSocketServer(address) {
  setReuseAddr(true)

  private def clientIp(conn: WebSocket): String =
    if (conn.getRemoteSocketAddress == null) "<unknown>"
    else conn.getRemoteSocketAddress.getAddress.getHostAddress

  override def onOpen(conn: WebSocket, handshake: ClientHandshake): Unit =
    log.debug(s"Client ${Ansi.italic(clientIp(conn))} connected")
  override def onClose(
    conn: WebSocket,
    code: Int,
    reason: String,
    remote: Boolean
  ): Unit = {
    log.debug(s"Client ${Ansi.italic(clientIp(conn))} disconnected")
    onDisconnect(conn)
  }
  override def onError(conn: WebSocket, ex: Exception): Unit =
    ex.printStackTrace()
  override def onStart(): Unit = {
    setConnectionLostTimeout(100)
    log.info(s"WebSocket server started on ${Ansi
      .italic(s"${address.getHostName}:${address.getPort}")}")
  }
  override def onMessage(conn: WebSocket, message: String): Unit =
    try {
      decode[WsCommand](message) match {
        case Left(e) =>
          log.error(
            s"Could not process message from ${Ansi.italic(clientIp(conn))}: $e"
          )
          conn.send(e.toString)
        case Right(c) =>
          log.debug(
            s"${Ansi.italic(c.description)} command received from ${Ansi.italic(clientIp(conn))}"
          )
          evalCommand(this, conn, c)
      }
    } catch {
      case t: Throwable => t.printStackTrace()
    }
  override def onMessage(conn: WebSocket, message: ByteBuffer): Unit = {}
} 
Example 171
Source File: GraphiteSink.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.net.InetSocketAddress
import java.util.Properties
import java.util.concurrent.TimeUnit

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.graphite.{GraphiteUDP, Graphite, GraphiteReporter}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class GraphiteSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val GRAPHITE_DEFAULT_PERIOD = 10
  val GRAPHITE_DEFAULT_UNIT = "SECONDS"
  val GRAPHITE_DEFAULT_PREFIX = ""

  val GRAPHITE_KEY_HOST = "host"
  val GRAPHITE_KEY_PORT = "port"
  val GRAPHITE_KEY_PERIOD = "period"
  val GRAPHITE_KEY_UNIT = "unit"
  val GRAPHITE_KEY_PREFIX = "prefix"
  val GRAPHITE_KEY_PROTOCOL = "protocol"

  def propertyToOption(prop: String): Option[String] = Option(property.getProperty(prop))

  if (!propertyToOption(GRAPHITE_KEY_HOST).isDefined) {
    throw new Exception("Graphite sink requires 'host' property.")
  }

  if (!propertyToOption(GRAPHITE_KEY_PORT).isDefined) {
    throw new Exception("Graphite sink requires 'port' property.")
  }

  val host = propertyToOption(GRAPHITE_KEY_HOST).get
  val port = propertyToOption(GRAPHITE_KEY_PORT).get.toInt

  val pollPeriod = propertyToOption(GRAPHITE_KEY_PERIOD) match {
    case Some(s) => s.toInt
    case None => GRAPHITE_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = propertyToOption(GRAPHITE_KEY_UNIT) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
    case None => TimeUnit.valueOf(GRAPHITE_DEFAULT_UNIT)
  }

  val prefix = propertyToOption(GRAPHITE_KEY_PREFIX).getOrElse(GRAPHITE_DEFAULT_PREFIX)

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val graphite = propertyToOption(GRAPHITE_KEY_PROTOCOL).map(_.toLowerCase) match {
    case Some("udp") => new GraphiteUDP(new InetSocketAddress(host, port))
    case Some("tcp") | None => new Graphite(new InetSocketAddress(host, port))
    case Some(p) => throw new Exception(s"Invalid Graphite protocol: $p")
  }

  val reporter: GraphiteReporter = GraphiteReporter.forRegistry(registry)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .prefixedWith(prefix)
      .build(graphite)

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 172
Source File: Message.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network.nio

import java.net.InetSocketAddress
import java.nio.ByteBuffer

import scala.collection.mutable.ArrayBuffer

import com.google.common.base.Charsets.UTF_8

import org.apache.spark.util.Utils

private[nio] abstract class Message(val typ: Long, val id: Int) {
  var senderAddress: InetSocketAddress = null
  var started = false
  var startTime = -1L
  var finishTime = -1L
  var isSecurityNeg = false
  var hasError = false

  def size: Int

  def getChunkForSending(maxChunkSize: Int): Option[MessageChunk]

  def getChunkForReceiving(chunkSize: Int): Option[MessageChunk]

  def timeTaken(): String = (finishTime - startTime).toString + " ms"

  override def toString: String = {
    this.getClass.getSimpleName + "(id = " + id + ", size = " + size + ")"
  }
}


private[nio] object Message {
  val BUFFER_MESSAGE = 1111111111L

  var lastId = 1

  def getNewId(): Int = synchronized {
    lastId += 1
    if (lastId == 0) {
      lastId += 1
    }
    lastId
  }

  def createBufferMessage(dataBuffers: Seq[ByteBuffer], ackId: Int): BufferMessage = {
    if (dataBuffers == null) {
      return new BufferMessage(getNewId(), new ArrayBuffer[ByteBuffer], ackId)
    }
    if (dataBuffers.exists(_ == null)) {
      throw new Exception("Attempting to create buffer message with null buffer")
    }
    new BufferMessage(getNewId(), new ArrayBuffer[ByteBuffer] ++= dataBuffers, ackId)
  }

  def createBufferMessage(dataBuffers: Seq[ByteBuffer]): BufferMessage =
    createBufferMessage(dataBuffers, 0)

  def createBufferMessage(dataBuffer: ByteBuffer, ackId: Int): BufferMessage = {
    if (dataBuffer == null) {
      //ByteBuffer.allocate在能够读和写之前,必须有一个缓冲区,用静态方法 allocate() 来分配缓冲区
      createBufferMessage(Array(ByteBuffer.allocate(0)), ackId)
    } else {
      createBufferMessage(Array(dataBuffer), ackId)
    }
  }

  def createBufferMessage(dataBuffer: ByteBuffer): BufferMessage =
    createBufferMessage(dataBuffer, 0)

  def createBufferMessage(ackId: Int): BufferMessage = {
    createBufferMessage(new Array[ByteBuffer](0), ackId)
  }

  
  def createErrorMessage(exception: Exception, ackId: Int): BufferMessage = {
    val exceptionString = Utils.exceptionString(exception)
    val serializedExceptionString = ByteBuffer.wrap(exceptionString.getBytes(UTF_8))
    val errorMessage = createBufferMessage(serializedExceptionString, ackId)
    errorMessage.hasError = true
    errorMessage
  }

  def create(header: MessageChunkHeader): Message = {
    val newMessage: Message = header.typ match {
      case BUFFER_MESSAGE => new BufferMessage(header.id,
        //ByteBuffer.allocate在能够读和写之前,必须有一个缓冲区,用静态方法 allocate() 来分配缓冲区
        ArrayBuffer(ByteBuffer.allocate(header.totalSize)), header.other)
    }
    newMessage.hasError = header.hasError
    newMessage.senderAddress = header.address
    newMessage
  }
} 
Example 173
Source File: MessageChunkHeader.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network.nio

import java.net.{InetAddress, InetSocketAddress}
import java.nio.ByteBuffer

private[nio] class MessageChunkHeader(
    val typ: Long,
    val id: Int,
    val totalSize: Int,
    val chunkSize: Int,
    val other: Int,
    val hasError: Boolean,
    val securityNeg: Int,
    val address: InetSocketAddress) {
  lazy val buffer = {
    // No need to change this, at 'use' time, we do a reverse lookup of the hostname.
    //不需要改变这个,在'use使用'的时候,我们对主机名做反向查询
    // Refer to network.Connection
    val ip = address.getAddress.getAddress()
    val port = address.getPort()
    ByteBuffer.
      allocate(MessageChunkHeader.HEADER_SIZE).
      putLong(typ).
      putInt(id).
      putInt(totalSize).
      putInt(chunkSize).
      putInt(other).
      put(if (hasError) 1.asInstanceOf[Byte] else 0.asInstanceOf[Byte]).
      putInt(securityNeg).
      putInt(ip.size).
      put(ip).
      putInt(port).
      position(MessageChunkHeader.HEADER_SIZE).
      flip.asInstanceOf[ByteBuffer]
  }

  override def toString: String = {
    "" + this.getClass.getSimpleName + ":" + id + " of type " + typ +
      " and sizes " + totalSize + " / " + chunkSize + " bytes, securityNeg: " + securityNeg
  }

}


private[nio] object MessageChunkHeader {
  val HEADER_SIZE = 45

  def create(buffer: ByteBuffer): MessageChunkHeader = {
    if (buffer.remaining != HEADER_SIZE) {
      throw new IllegalArgumentException("Cannot convert buffer data to Message")
    }
    val typ = buffer.getLong()
    val id = buffer.getInt()
    val totalSize = buffer.getInt()
    val chunkSize = buffer.getInt()
    val other = buffer.getInt()
    val hasError = buffer.get() != 0
    val securityNeg = buffer.getInt()
    val ipSize = buffer.getInt()
    val ipBytes = new Array[Byte](ipSize)
    buffer.get(ipBytes)
    val ip = InetAddress.getByAddress(ipBytes)
    val port = buffer.getInt()
    new MessageChunkHeader(typ, id, totalSize, chunkSize, other, hasError, securityNeg,
      new InetSocketAddress(ip, port))
  }
} 
Example 174
Source File: FlumePollingStreamSuite.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.flume

import java.net.InetSocketAddress

import scala.collection.JavaConversions._
import scala.collection.mutable.{SynchronizedBuffer, ArrayBuffer}
import scala.concurrent.duration._
import scala.language.postfixOps

import com.google.common.base.Charsets.UTF_8
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.Eventually._

import org.apache.spark.{Logging, SparkConf, SparkFunSuite}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.{Seconds, TestOutputStream, StreamingContext}
import org.apache.spark.util.{ManualClock, Utils}

  private def testMultipleTimes(test: () => Unit): Unit = {
    var testPassed = false
    var attempt = 0
    while (!testPassed && attempt < maxAttempts) {
      try {
        test()
        testPassed = true
      } catch {
        case e: Exception if Utils.isBindCollision(e) =>
          logWarning("Exception when running flume polling test: " + e)
          attempt += 1
      }
    }
    assert(testPassed, s"Test failed after $attempt attempts!")
  }

  private def testFlumePolling(): Unit = {
    try {
      val port = utils.startSingleSink()

      writeAndVerify(Seq(port))
      utils.assertChannelsAreEmpty()
    } finally {
      utils.close()
    }
  }

  private def testFlumePollingMultipleHost(): Unit = {
    try {
      val ports = utils.startMultipleSinks()
      writeAndVerify(ports)
      utils.assertChannelsAreEmpty()
    } finally {
      utils.close()
    }
  }

  def writeAndVerify(sinkPorts: Seq[Int]): Unit = {
    // Set up the streaming context and input streams
    //设置流上下文和输入流
    val ssc = new StreamingContext(conf, batchDuration)
    val addresses = sinkPorts.map(port => new InetSocketAddress("localhost", port))
    val flumeStream: ReceiverInputDStream[SparkFlumeEvent] =
      FlumeUtils.createPollingStream(ssc, addresses, StorageLevel.MEMORY_AND_DISK,
        utils.eventsPerBatch, 5)
    val outputBuffer = new ArrayBuffer[Seq[SparkFlumeEvent]]
      with SynchronizedBuffer[Seq[SparkFlumeEvent]]
    val outputStream = new TestOutputStream(flumeStream, outputBuffer)
    outputStream.register()

    ssc.start()
    try {
      utils.sendDatAndEnsureAllDataHasBeenReceived()
      val clock = ssc.scheduler.clock.asInstanceOf[ManualClock]
      clock.advance(batchDuration.milliseconds)

      // The eventually is required to ensure that all data in the batch has been processed.
      //最终需要确保批处理中的所有数据已被处理
      eventually(timeout(10 seconds), interval(100 milliseconds)) {
        val flattenOutputBuffer = outputBuffer.flatten
        val headers = flattenOutputBuffer.map(_.event.getHeaders.map {
          case kv => (kv._1.toString, kv._2.toString)
        }).map(mapAsJavaMap)
        val bodies = flattenOutputBuffer.map(e => new String(e.event.getBody.array(), UTF_8))
        utils.assertOutput(headers, bodies)
      }
    } finally {
      ssc.stop()
    }
  }

} 
Example 175
Source File: GraphiteSink.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.net.InetSocketAddress
import java.util.{Locale, Properties}
import java.util.concurrent.TimeUnit

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.graphite.{Graphite, GraphiteReporter, GraphiteUDP}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class GraphiteSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val GRAPHITE_DEFAULT_PERIOD = 10
  val GRAPHITE_DEFAULT_UNIT = "SECONDS"
  val GRAPHITE_DEFAULT_PREFIX = ""

  val GRAPHITE_KEY_HOST = "host"
  val GRAPHITE_KEY_PORT = "port"
  val GRAPHITE_KEY_PERIOD = "period"
  val GRAPHITE_KEY_UNIT = "unit"
  val GRAPHITE_KEY_PREFIX = "prefix"
  val GRAPHITE_KEY_PROTOCOL = "protocol"

  def propertyToOption(prop: String): Option[String] = Option(property.getProperty(prop))

  if (!propertyToOption(GRAPHITE_KEY_HOST).isDefined) {
    throw new Exception("Graphite sink requires 'host' property.")
  }

  if (!propertyToOption(GRAPHITE_KEY_PORT).isDefined) {
    throw new Exception("Graphite sink requires 'port' property.")
  }

  val host = propertyToOption(GRAPHITE_KEY_HOST).get
  val port = propertyToOption(GRAPHITE_KEY_PORT).get.toInt

  val pollPeriod = propertyToOption(GRAPHITE_KEY_PERIOD) match {
    case Some(s) => s.toInt
    case None => GRAPHITE_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = propertyToOption(GRAPHITE_KEY_UNIT) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase(Locale.ROOT))
    case None => TimeUnit.valueOf(GRAPHITE_DEFAULT_UNIT)
  }

  val prefix = propertyToOption(GRAPHITE_KEY_PREFIX).getOrElse(GRAPHITE_DEFAULT_PREFIX)

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val graphite = propertyToOption(GRAPHITE_KEY_PROTOCOL).map(_.toLowerCase(Locale.ROOT)) match {
    case Some("udp") => new GraphiteUDP(host, port)
    case Some("tcp") | None => new Graphite(host, port)
    case Some(p) => throw new Exception(s"Invalid Graphite protocol: $p")
  }

  val reporter: GraphiteReporter = GraphiteReporter.forRegistry(registry)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .prefixedWith(prefix)
      .build(graphite)

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 176
Source File: EmbeddedZookeperServer.scala    From affinity   with Apache License 2.0 5 votes vote down vote up
package io.amient.affinity.kafka

import java.io.File
import java.net.InetSocketAddress

import org.apache.zookeeper.server.{NIOServerCnxnFactory, ZooKeeperServer}
import org.slf4j.LoggerFactory

trait EmbeddedZookeperServer extends EmbeddedService {

  private val logger = LoggerFactory.getLogger(classOf[EmbeddedZookeperServer])

  private val embeddedZkPath = new File(testDir, "local-zookeeper")
  // smaller testDir footprint, default zookeeper file blocks are 65535Kb
  System.getProperties().setProperty("zookeeper.preAllocSize", "64")
  private val zookeeper = new ZooKeeperServer(new File(embeddedZkPath, "snapshots"), new File(embeddedZkPath, "logs"), 3000)
  private val zkFactory = new NIOServerCnxnFactory
  zkFactory.configure(new InetSocketAddress(0), 10)
  val zkConnect = "localhost:" + zkFactory.getLocalPort
  logger.info(s"Embedded ZooKeeper $zkConnect, data directory: $testDir")
  zkFactory.startup(zookeeper)

  abstract override def close(): Unit = try zkFactory.shutdown() finally super.close

} 
Example 177
Source File: ClusterBuilderSpec.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.context.cassandra.cluster

import java.net.InetSocketAddress

import com.datastax.driver.core.Cluster.Builder
import com.typesafe.config.ConfigFactory
import io.getquill.Spec

class ClusterBuilderSpec extends Spec {

  val hosts = List("127.0.0.1", "127.0.0.2", "127.0.0.3")
  val contactPoints = hosts.map(new InetSocketAddress(_, 9042))

  "creates Builder" - {

    "with a single host" in {
      val cfgString = s"contactPoint = ${hosts.head}"
      val clusterBuilder: Builder = ClusterBuilder(ConfigFactory.parseString(cfgString))
      clusterBuilder.getContactPoints must contain theSameElementsAs (contactPoints.take(1))
    }

    "with a single host in an array" in {
      val cfgString = s"contactPoints = [${hosts.head}]"
      val clusterBuilder: Builder = ClusterBuilder(ConfigFactory.parseString(cfgString))
      clusterBuilder.getContactPoints must contain theSameElementsAs (contactPoints.take(1))
    }

    "with multiple hosts" in {
      val cfgString = s"""contactPoints = [${hosts.mkString(",")}] """
      val clusterBuilder: Builder = ClusterBuilder(ConfigFactory.parseString(cfgString))
      clusterBuilder.getContactPoints must contain theSameElementsAs (contactPoints)
    }
  }
} 
Example 178
Source File: GraphiteSink.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.net.InetSocketAddress
import java.util.Properties
import java.util.concurrent.TimeUnit

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.graphite.{GraphiteUDP, Graphite, GraphiteReporter}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class GraphiteSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val GRAPHITE_DEFAULT_PERIOD = 10
  val GRAPHITE_DEFAULT_UNIT = "SECONDS"
  val GRAPHITE_DEFAULT_PREFIX = ""

  val GRAPHITE_KEY_HOST = "host"
  val GRAPHITE_KEY_PORT = "port"
  val GRAPHITE_KEY_PERIOD = "period"
  val GRAPHITE_KEY_UNIT = "unit"
  val GRAPHITE_KEY_PREFIX = "prefix"
  val GRAPHITE_KEY_PROTOCOL = "protocol"

  def propertyToOption(prop: String): Option[String] = Option(property.getProperty(prop))

  if (!propertyToOption(GRAPHITE_KEY_HOST).isDefined) {
    throw new Exception("Graphite sink requires 'host' property.")
  }

  if (!propertyToOption(GRAPHITE_KEY_PORT).isDefined) {
    throw new Exception("Graphite sink requires 'port' property.")
  }

  val host = propertyToOption(GRAPHITE_KEY_HOST).get
  val port = propertyToOption(GRAPHITE_KEY_PORT).get.toInt

  val pollPeriod = propertyToOption(GRAPHITE_KEY_PERIOD) match {
    case Some(s) => s.toInt
    case None => GRAPHITE_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = propertyToOption(GRAPHITE_KEY_UNIT) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
    case None => TimeUnit.valueOf(GRAPHITE_DEFAULT_UNIT)
  }

  val prefix = propertyToOption(GRAPHITE_KEY_PREFIX).getOrElse(GRAPHITE_DEFAULT_PREFIX)

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val graphite = propertyToOption(GRAPHITE_KEY_PROTOCOL).map(_.toLowerCase) match {
    case Some("udp") => new GraphiteUDP(new InetSocketAddress(host, port))
    case Some("tcp") | None => new Graphite(new InetSocketAddress(host, port))
    case Some(p) => throw new Exception(s"Invalid Graphite protocol: $p")
  }

  val reporter: GraphiteReporter = GraphiteReporter.forRegistry(registry)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .prefixedWith(prefix)
      .build(graphite)

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 179
Source File: Message.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network.nio

import java.net.InetSocketAddress
import java.nio.ByteBuffer

import scala.collection.mutable.ArrayBuffer

import com.google.common.base.Charsets.UTF_8

import org.apache.spark.util.Utils

private[nio] abstract class Message(val typ: Long, val id: Int) {
  var senderAddress: InetSocketAddress = null
  var started = false
  var startTime = -1L
  var finishTime = -1L
  var isSecurityNeg = false
  var hasError = false

  def size: Int

  def getChunkForSending(maxChunkSize: Int): Option[MessageChunk]

  def getChunkForReceiving(chunkSize: Int): Option[MessageChunk]

  def timeTaken(): String = (finishTime - startTime).toString + " ms"

  override def toString: String = {
    this.getClass.getSimpleName + "(id = " + id + ", size = " + size + ")"
  }
}


private[nio] object Message {
  val BUFFER_MESSAGE = 1111111111L

  var lastId = 1

  def getNewId(): Int = synchronized {
    lastId += 1
    if (lastId == 0) {
      lastId += 1
    }
    lastId
  }

  def createBufferMessage(dataBuffers: Seq[ByteBuffer], ackId: Int): BufferMessage = {
    if (dataBuffers == null) {
      return new BufferMessage(getNewId(), new ArrayBuffer[ByteBuffer], ackId)
    }
    if (dataBuffers.exists(_ == null)) {
      throw new Exception("Attempting to create buffer message with null buffer")
    }
    new BufferMessage(getNewId(), new ArrayBuffer[ByteBuffer] ++= dataBuffers, ackId)
  }

  def createBufferMessage(dataBuffers: Seq[ByteBuffer]): BufferMessage =
    createBufferMessage(dataBuffers, 0)

  def createBufferMessage(dataBuffer: ByteBuffer, ackId: Int): BufferMessage = {
    if (dataBuffer == null) {
      createBufferMessage(Array(ByteBuffer.allocate(0)), ackId)
    } else {
      createBufferMessage(Array(dataBuffer), ackId)
    }
  }

  def createBufferMessage(dataBuffer: ByteBuffer): BufferMessage =
    createBufferMessage(dataBuffer, 0)

  def createBufferMessage(ackId: Int): BufferMessage = {
    createBufferMessage(new Array[ByteBuffer](0), ackId)
  }

  
  def createErrorMessage(exception: Exception, ackId: Int): BufferMessage = {
    val exceptionString = Utils.exceptionString(exception)
    val serializedExceptionString = ByteBuffer.wrap(exceptionString.getBytes(UTF_8))
    val errorMessage = createBufferMessage(serializedExceptionString, ackId)
    errorMessage.hasError = true
    errorMessage
  }

  def create(header: MessageChunkHeader): Message = {
    val newMessage: Message = header.typ match {
      case BUFFER_MESSAGE => new BufferMessage(header.id,
        ArrayBuffer(ByteBuffer.allocate(header.totalSize)), header.other)
    }
    newMessage.hasError = header.hasError
    newMessage.senderAddress = header.address
    newMessage
  }
} 
Example 180
Source File: MessageChunkHeader.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network.nio

import java.net.{InetAddress, InetSocketAddress}
import java.nio.ByteBuffer

private[nio] class MessageChunkHeader(
    val typ: Long,
    val id: Int,
    val totalSize: Int,
    val chunkSize: Int,
    val other: Int,
    val hasError: Boolean,
    val securityNeg: Int,
    val address: InetSocketAddress) {
  lazy val buffer = {
    // No need to change this, at 'use' time, we do a reverse lookup of the hostname.
    // Refer to network.Connection
    val ip = address.getAddress.getAddress()
    val port = address.getPort()
    ByteBuffer.
      allocate(MessageChunkHeader.HEADER_SIZE).
      putLong(typ).
      putInt(id).
      putInt(totalSize).
      putInt(chunkSize).
      putInt(other).
      put(if (hasError) 1.asInstanceOf[Byte] else 0.asInstanceOf[Byte]).
      putInt(securityNeg).
      putInt(ip.size).
      put(ip).
      putInt(port).
      position(MessageChunkHeader.HEADER_SIZE).
      flip.asInstanceOf[ByteBuffer]
  }

  override def toString: String = {
    "" + this.getClass.getSimpleName + ":" + id + " of type " + typ +
      " and sizes " + totalSize + " / " + chunkSize + " bytes, securityNeg: " + securityNeg
  }

}


private[nio] object MessageChunkHeader {
  val HEADER_SIZE = 45

  def create(buffer: ByteBuffer): MessageChunkHeader = {
    if (buffer.remaining != HEADER_SIZE) {
      throw new IllegalArgumentException("Cannot convert buffer data to Message")
    }
    val typ = buffer.getLong()
    val id = buffer.getInt()
    val totalSize = buffer.getInt()
    val chunkSize = buffer.getInt()
    val other = buffer.getInt()
    val hasError = buffer.get() != 0
    val securityNeg = buffer.getInt()
    val ipSize = buffer.getInt()
    val ipBytes = new Array[Byte](ipSize)
    buffer.get(ipBytes)
    val ip = InetAddress.getByAddress(ipBytes)
    val port = buffer.getInt()
    new MessageChunkHeader(typ, id, totalSize, chunkSize, other, hasError, securityNeg,
      new InetSocketAddress(ip, port))
  }
} 
Example 181
Source File: FlumeStreamSuite.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.flume

import java.net.{InetSocketAddress, ServerSocket}
import java.nio.ByteBuffer

import scala.collection.JavaConversions._
import scala.collection.mutable.{ArrayBuffer, SynchronizedBuffer}
import scala.concurrent.duration._
import scala.language.postfixOps

import com.google.common.base.Charsets
import org.apache.avro.ipc.NettyTransceiver
import org.apache.avro.ipc.specific.SpecificRequestor
import org.apache.commons.lang3.RandomUtils
import org.apache.flume.source.avro
import org.apache.flume.source.avro.{AvroFlumeEvent, AvroSourceProtocol}
import org.jboss.netty.channel.ChannelPipeline
import org.jboss.netty.channel.socket.SocketChannel
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory
import org.jboss.netty.handler.codec.compression._
import org.scalatest.{BeforeAndAfter, Matchers}
import org.scalatest.concurrent.Eventually._

import org.apache.spark.{Logging, SparkConf, SparkFunSuite}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext, TestOutputStream}
import org.apache.spark.util.Utils

class FlumeStreamSuite extends SparkFunSuite with BeforeAndAfter with Matchers with Logging {
  val conf = new SparkConf().setMaster("local[4]").setAppName("FlumeStreamSuite")

  var ssc: StreamingContext = null
  var transceiver: NettyTransceiver = null

  after {
    if (ssc != null) {
      ssc.stop()
    }
    if (transceiver != null) {
      transceiver.close()
    }
  }

  test("flume input stream") {
    testFlumeStream(testCompression = false)
  }

  test("flume input compressed stream") {
    testFlumeStream(testCompression = true)
  }

  
  private class CompressionChannelFactory(compressionLevel: Int)
    extends NioClientSocketChannelFactory {

    override def newChannel(pipeline: ChannelPipeline): SocketChannel = {
      val encoder = new ZlibEncoder(compressionLevel)
      pipeline.addFirst("deflater", encoder)
      pipeline.addFirst("inflater", new ZlibDecoder())
      super.newChannel(pipeline)
    }
  }
} 
Example 182
Source File: FlumePollingEventCount.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.examples.streaming

import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming._
import org.apache.spark.streaming.flume._
import org.apache.spark.util.IntParam
import java.net.InetSocketAddress


object FlumePollingEventCount {
  def main(args: Array[String]) {
    if (args.length < 2) {
      System.err.println(
        "Usage: FlumePollingEventCount <host> <port>")
      System.exit(1)
    }

    StreamingExamples.setStreamingLogLevels()

    val Array(host, IntParam(port)) = args

    val batchInterval = Milliseconds(2000)

    // Create the context and set the batch size
    val sparkConf = new SparkConf().setAppName("FlumePollingEventCount")
    val ssc = new StreamingContext(sparkConf, batchInterval)

    // Create a flume stream that polls the Spark Sink running in a Flume agent
    val stream = FlumeUtils.createPollingStream(ssc, host, port)

    // Print out the count of events received from this server in each batch
    stream.count().map(cnt => "Received " + cnt + " flume events." ).print()

    ssc.start()
    ssc.awaitTermination()
  }
} 
Example 183
Source File: NettyRpcHandlerSuite.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.rpc.netty

import java.net.InetSocketAddress
import java.nio.ByteBuffer

import io.netty.channel.Channel
import org.mockito.Matchers._
import org.mockito.Mockito._

import org.apache.spark.SparkFunSuite
import org.apache.spark.network.client.{TransportClient, TransportResponseHandler}
import org.apache.spark.network.server.StreamManager
import org.apache.spark.rpc._

class NettyRpcHandlerSuite extends SparkFunSuite {

  val env = mock(classOf[NettyRpcEnv])
  val sm = mock(classOf[StreamManager])
  when(env.deserialize(any(classOf[TransportClient]), any(classOf[ByteBuffer]))(any()))
    .thenReturn(RequestMessage(RpcAddress("localhost", 12345), null, null))

  test("receive") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelActive(client)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
  }

  test("connectionTerminated") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelActive(client)

    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.channelInactive(client)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
    verify(dispatcher, times(1)).postToAll(
      RemoteProcessDisconnected(RpcAddress("localhost", 40000)))
  }

} 
Example 184
Source File: GraphiteSink.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.net.InetSocketAddress
import java.util.Properties
import java.util.concurrent.TimeUnit

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.graphite.{Graphite, GraphiteReporter, GraphiteUDP}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class GraphiteSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val GRAPHITE_DEFAULT_PERIOD = 10
  val GRAPHITE_DEFAULT_UNIT = "SECONDS"
  val GRAPHITE_DEFAULT_PREFIX = ""

  val GRAPHITE_KEY_HOST = "host"
  val GRAPHITE_KEY_PORT = "port"
  val GRAPHITE_KEY_PERIOD = "period"
  val GRAPHITE_KEY_UNIT = "unit"
  val GRAPHITE_KEY_PREFIX = "prefix"
  val GRAPHITE_KEY_PROTOCOL = "protocol"

  def propertyToOption(prop: String): Option[String] = Option(property.getProperty(prop))

  if (!propertyToOption(GRAPHITE_KEY_HOST).isDefined) {
    throw new Exception("Graphite sink requires 'host' property.")
  }

  if (!propertyToOption(GRAPHITE_KEY_PORT).isDefined) {
    throw new Exception("Graphite sink requires 'port' property.")
  }

  val host = propertyToOption(GRAPHITE_KEY_HOST).get
  val port = propertyToOption(GRAPHITE_KEY_PORT).get.toInt

  val pollPeriod = propertyToOption(GRAPHITE_KEY_PERIOD) match {
    case Some(s) => s.toInt
    case None => GRAPHITE_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = propertyToOption(GRAPHITE_KEY_UNIT) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
    case None => TimeUnit.valueOf(GRAPHITE_DEFAULT_UNIT)
  }

  val prefix = propertyToOption(GRAPHITE_KEY_PREFIX).getOrElse(GRAPHITE_DEFAULT_PREFIX)

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val graphite = propertyToOption(GRAPHITE_KEY_PROTOCOL).map(_.toLowerCase) match {
    case Some("udp") => new GraphiteUDP(new InetSocketAddress(host, port))
    case Some("tcp") | None => new Graphite(new InetSocketAddress(host, port))
    case Some(p) => throw new Exception(s"Invalid Graphite protocol: $p")
  }

  val reporter: GraphiteReporter = GraphiteReporter.forRegistry(registry)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .prefixedWith(prefix)
      .build(graphite)

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 185
Source File: BitVectorSocket.scala    From skunk   with MIT License 5 votes vote down vote up
// Copyright (c) 2018-2020 by Rob Norris
// This software is licensed under the MIT License (MIT).
// For more information see LICENSE or https://opensource.org/licenses/MIT

package skunk.net

import cats._
import cats.effect._
import cats.implicits._
import fs2.Chunk
import fs2.io.tcp.Socket
import scala.concurrent.duration.FiniteDuration
import scodec.bits.BitVector
import java.net.InetSocketAddress
import java.nio.channels._
import java.util.concurrent.Executors
import java.util.concurrent.ThreadFactory
import fs2.io.tcp.SocketGroup


  def apply[F[_]: Concurrent: ContextShift](
    host:         String,
    port:         Int,
    readTimeout:  FiniteDuration,
    writeTimeout: FiniteDuration,
    sg:           SocketGroup,
    sslOptions:   Option[SSLNegotiation.Options[F]],
  ): Resource[F, BitVectorSocket[F]] =
    for {
      sock  <- sg.client[F](new InetSocketAddress(host, port))
      sockʹ <- sslOptions.fold(sock.pure[Resource[F, ?]])(SSLNegotiation.negotiateSSL(sock, readTimeout, writeTimeout, _))
    } yield fromSocket(sockʹ, readTimeout, writeTimeout)

} 
Example 186
Source File: InProcessDeploy.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.testing.kernel.remote

import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicBoolean

import polynote.kernel.{BaseEnv, GlobalEnv, Kernel}
import polynote.kernel.environment.CurrentNotebook
import polynote.kernel.logging.Logging
import polynote.kernel.remote.{RemoteKernelClient, SocketTransport}
import zio.{Fiber, RIO, Ref, ZIO}
import zio.duration.Duration

class InProcessDeploy(kernelFactory: Kernel.Factory.LocalService, clientRef: Ref[RemoteKernelClient]) extends SocketTransport.Deploy {
  def deployKernel(transport: SocketTransport, serverAddress: InetSocketAddress): RIO[BaseEnv with GlobalEnv with CurrentNotebook, SocketTransport.DeployedProcess] = {
    val connectClient = RemoteKernelClient.tapRunThrowable(
      RemoteKernelClient.Args(
        Some(serverAddress.getHostString),
        Some(serverAddress.getPort),
        Some(kernelFactory)),
      Some(clientRef))

    connectClient.forkDaemon.map(new InProcessDeploy.Process(_))
  }

}

object InProcessDeploy {
  class Process(fiber: Fiber[Throwable, Int]) extends SocketTransport.DeployedProcess {
    def exitStatus: RIO[BaseEnv, Option[Int]] = fiber.poll.flatMap {
      case Some(exit) => ZIO.fromEither(exit.toEither).map(Some(_))
      case None => ZIO.succeed(None)
    }

    def awaitExit(timeout: Long, timeUnit: TimeUnit): RIO[BaseEnv, Option[Int]] = {
      fiber.join.disconnect.timeout(Duration(timeout, timeUnit))
    }

    def kill(): RIO[BaseEnv, Unit] = fiber.interrupt.unit
  }
} 
Example 187
Source File: SocksProxyChecker.scala    From ProxyCrawler   with Apache License 2.0 5 votes vote down vote up
package org.crowdcrawler.proxycrawler.checker

import java.net
import java.net.{InetSocketAddress, Socket, URI}
import java.nio.charset.StandardCharsets
import javax.net.ssl.{HostnameVerifier, SSLContext}

import org.apache.http.annotation.ThreadSafe
import org.apache.http.client.methods.HttpGet
import org.apache.http.client.protocol.HttpClientContext
import org.apache.http.config.RegistryBuilder
import org.apache.http.conn.socket.{ConnectionSocketFactory, PlainConnectionSocketFactory}
import org.apache.http.conn.ssl.{NoopHostnameVerifier, SSLConnectionSocketFactory}
import org.apache.http.impl.client.HttpClients
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager
import org.apache.http.protocol.HttpContext
import org.apache.http.util.EntityUtils


@ThreadSafe
private[checker] object SocksProxyChecker extends AbstractProxyChecker {
  private class MyHttpConnectionSocketFactory extends PlainConnectionSocketFactory {
    override def createSocket(context: HttpContext): Socket = {
      val socksAddress = context.getAttribute("socks.address").asInstanceOf[InetSocketAddress]
      val proxy = new net.Proxy(net.Proxy.Type.SOCKS, socksAddress)
      new Socket(proxy)
    }
  }

  private class MyHttpsConnectionSocketFactory(sslContext: SSLContext, verifier: HostnameVerifier)
    extends SSLConnectionSocketFactory(sslContext) {
    override def createSocket(context: HttpContext): Socket = {
      val socksAddress = context.getAttribute("socks.address").asInstanceOf[InetSocketAddress]
      val proxy = new net.Proxy(net.Proxy.Type.SOCKS, socksAddress)
      new Socket(proxy)
    }
  }

  private val CLIENT = {
    val reg = RegistryBuilder.create[ConnectionSocketFactory]()
      .register("http", new MyHttpConnectionSocketFactory())
      .register("https",
        new MyHttpsConnectionSocketFactory(HttpsProxyChecker.SSL_CONTEXT, NoopHostnameVerifier.INSTANCE))
      .build()
    val cm = new PoolingHttpClientConnectionManager(reg)
    cm.setMaxTotal(AbstractProxyChecker.MAX_CONN)
    HttpClients.custom().setConnectionManager(cm).disableRedirectHandling().build()
  }
  private val TARGET_URL = new URI("http://www.baidu.com")


  def check(host: String, port: Int): (Int, Int) = {
    val request = new HttpGet(TARGET_URL)
    AbstractProxyChecker.configureRequest(request)

    val httpContext = {
      val socksAddress = new InetSocketAddress(host, port)
      val context = HttpClientContext.create()
      context.setAttribute("socks.address", socksAddress)
      context
    }

    val response = CLIENT.execute(request, httpContext)

    val statusCode = response.getStatusLine.getStatusCode
    val html = EntityUtils.toString(response.getEntity, StandardCharsets.UTF_8)
    if (statusCode == 200 && html.contains("<title>百度一下")) (statusCode, html.getBytes.length) else (statusCode, -1)
  }
} 
Example 188
Source File: package.scala    From infinispan-spark   with Apache License 2.0 5 votes vote down vote up
package org.infinispan

import java.net.InetSocketAddress

import org.apache.spark.TaskContext
import org.apache.spark.rdd.RDD
import org.infinispan.client.hotrod.{CacheTopologyInfo, DataFormat, RemoteCache, RemoteCacheManager}
import org.infinispan.commons.dataconversion.MediaType
import org.infinispan.spark.config.ConnectorConfiguration
import org.infinispan.spark.rdd.RemoteCacheManagerBuilder

import scala.collection.JavaConversions._

package object spark {

   def getCacheTopology(cacheTopology: CacheTopologyInfo): String = {
      val segmentsPerServer = cacheTopology.getSegmentsPerServer
      segmentsPerServer.keySet.map {
         case i: InetSocketAddress => s"${i.getHostString}:${i.getPort}"
      }.mkString(";")
   }

   def decorateWithFormat(config: ConnectorConfiguration, cache: RemoteCache[_, _]): RemoteCache[_, _] = {
      if (!config.hasCustomFormat) cache else {
         val dataFormat = DataFormat.builder()
         Option(config.getKeyMediaType).map(MediaType.fromString).foreach(dataFormat.keyType)
         Option(config.getValueMediaType).map(MediaType.fromString).foreach(dataFormat.valueType)
         Option(config.getKeyMarshaller).map(_.newInstance).foreach(dataFormat.keyMarshaller)
         Option(config.getValueMarshaller).map(_.newInstance).foreach(dataFormat.valueMarshaller)
         cache.withDataFormat(dataFormat.build())
      }
   }

   def getCache[K, V](config: ConnectorConfiguration, rcm: RemoteCacheManager): RemoteCache[K, V] = {
      val cacheName = config.getCacheName
      val remoteCache = Option(cacheName).map(name => rcm.getCache[K, V](name)).getOrElse(rcm.getCache[K, V])
      decorateWithFormat(config, remoteCache).asInstanceOf[RemoteCache[K, V]]
   }

   implicit class RDDExtensions[K, V](rdd: RDD[(K, V)]) extends Serializable {

      def writeToInfinispan(configuration: ConnectorConfiguration): Unit = {
         val processor = (ctx: TaskContext, iterator: Iterator[(K, V)]) => {
            val remoteCacheManager = RemoteCacheManagerBuilder.create(configuration)
            val cache = getCache[K, V](configuration, remoteCacheManager)
            configuration.setServerList(getCacheTopology(cache.getCacheTopologyInfo))
            ctx.addTaskCompletionListener[Unit](_ => remoteCacheManager.stop())
            new InfinispanWriteJob(configuration).runJob(iterator, ctx)
         }
         rdd.sparkContext.runJob(rdd, processor)
      }

      private class InfinispanWriteJob(val configuration: ConnectorConfiguration) extends Serializable {
         private def getCacheManager: RemoteCacheManager = RemoteCacheManagerBuilder.create(configuration)

         def runJob(iterator: Iterator[(K, V)], ctx: TaskContext): Unit = {
            val remoteCacheManager = getCacheManager
            ctx.addTaskCompletionListener[Unit](_ => remoteCacheManager.stop())
            val cache = getCache[K, V](configuration, remoteCacheManager)
            val batchSize = configuration.getWriteBatchSize
            iterator.grouped(batchSize).foreach(kv => cache.putAll(mapAsJavaMap(kv.toMap)))
         }
      }

   }


} 
Example 189
Source File: RemoteCacheManagerBuilder.scala    From infinispan-spark   with Apache License 2.0 5 votes vote down vote up
package org.infinispan.spark.rdd

import java.net.InetSocketAddress
import java.util.function.Supplier

import org.infinispan.client.hotrod.configuration.ConfigurationBuilder
import org.infinispan.client.hotrod.marshall.MarshallerUtil
import org.infinispan.client.hotrod.{FailoverRequestBalancingStrategy, RemoteCacheManager}
import org.infinispan.commons.marshall.{Marshaller, ProtoStreamMarshaller}
import org.infinispan.protostream.FileDescriptorSource
import org.infinispan.protostream.annotations.ProtoSchemaBuilder
import org.infinispan.query.remote.client.ProtobufMetadataManagerConstants
import org.infinispan.spark.config.ConnectorConfiguration

import scala.collection.JavaConverters._



   def create(cfg: ConnectorConfiguration, preferredAddress: InetSocketAddress): RemoteCacheManager = create(cfg, Some(preferredAddress))

   private def create(cfg: ConnectorConfiguration, preferredAddress: Option[InetSocketAddress]) = {
      if (!cfg.usesProtobuf) new RemoteCacheManager(createBuilder(cfg, preferredAddress, None).build())
      else
         createForQuery(cfg, preferredAddress)
   }

   private def createForQuery(cfg: ConnectorConfiguration, preferredAddress: Option[InetSocketAddress]) = {
      val builder = createBuilder(cfg, preferredAddress, Some(new ProtoStreamMarshaller))
      val rcm = new RemoteCacheManager(builder.build())
      buildSerializationContext(cfg, rcm)
   }

   private def createBuilder(cfg: ConnectorConfiguration, preferredAddress: Option[InetSocketAddress], marshaller: Option[Marshaller]) = {
      val configBuilder = new ConfigurationBuilder().withProperties(cfg.getHotRodClientProperties)
      def balancingStrategyFactory(a: InetSocketAddress) = new Supplier[FailoverRequestBalancingStrategy] {
         override def get(): FailoverRequestBalancingStrategy = new PreferredServerBalancingStrategy(a)
      }
      preferredAddress.foreach(balancingStrategyFactory)
      marshaller.foreach(m => configBuilder.marshaller(m))
      configBuilder
   }

   private def buildSerializationContext(cfg: ConnectorConfiguration, cm: RemoteCacheManager) = {
      val metadataCache = cm.getCache[String, AnyRef](ProtobufMetadataManagerConstants.PROTOBUF_METADATA_CACHE_NAME)
      val autoRegister = cfg.getRegisterSchemas
      def buildDescriptorSource(descriptors: Map[String, String]): FileDescriptorSource = {
         val fileDescriptorSource = new FileDescriptorSource
         descriptors.foldLeft(fileDescriptorSource) {
            case (fds, (fileName, contents)) => fds.addProtoFile(fileName, contents)
         }
         fileDescriptorSource
      }
      val serCtx = MarshallerUtil.getSerializationContext(cm)

      val protoDescriptors = cfg.getProtoFiles
      val marshallers = cfg.getMarshallers
      val protoAnnotatedEntities = cfg.getProtoEntities
      val descriptorSource = buildDescriptorSource(protoDescriptors)
      if (autoRegister) {
         descriptorSource.getFileDescriptors.asScala.foreach { case (name, contents) => metadataCache.put(name, new String(contents)) }
      }
      serCtx.registerProtoFiles(descriptorSource)

      marshallers.foreach { c => serCtx.registerMarshaller(c.newInstance()) }

      if (protoDescriptors.isEmpty) {
         val protoSchemaBuilder = new ProtoSchemaBuilder
         protoAnnotatedEntities.foreach { e =>
            val fileName = s"${e.getName}.proto"
            val contents = protoSchemaBuilder.fileName(fileName).addClass(e).build(serCtx)
            if (autoRegister) {
               metadataCache.put(fileName, contents)
            }
         }
      }
      cm
   }

} 
Example 190
Source File: StatsdExporter.scala    From akka-mon   with MIT License 5 votes vote down vote up
package org.akkamon.core.exporters

import java.net.InetSocketAddress

import akka.actor.{ActorRef, Actor, Props}
import akka.io.{Udp, IO}
import akka.util.ByteString
import org.akkamon.core.{Config, InstrumentExporter}


object StatsdExporter extends InstrumentExporter {

  def formatEvent(event: TimerEvent): String = s"${event.timer}:${event.value}|ms"
  def formatEvent(event: CounterEvent): String = s"${event.key}:${event.value}|g"  // we sent this as a gauge so statsd keeps track of the values
  def formatEvent(event: SampledEvent): String = s"${event.key}:${event.value}|c"

  val instrumentActor = system.actorOf(Props(classOf[SimpleSender], new InetSocketAddress(Config.StatsdHost, Config.StatsdPort)))

  class SimpleSender(remote: InetSocketAddress) extends Actor {

    IO(Udp) ! Udp.SimpleSender

    def receive = {
      case Udp.SimpleSenderReady =>
        context.become(ready(sender()))
    }

    def ready(send: ActorRef): Receive = {
      case msg: MessageEvent => // do nothing, a message can't be sent to statsd
      case timer: TimerEvent => send ! Udp.Send(ByteString(formatEvent(timer)), remote)
      case counter: CounterEvent => send ! Udp.Send(ByteString(formatEvent(counter)), remote)
      case counter: SampledEvent => send ! Udp.Send(ByteString(formatEvent(counter)), remote)
      case counterMap: CounterEventMap =>
        counterMap.counts.foreach{ case (key, value) => send ! Udp.Send(ByteString(formatEvent(CounterEvent(key, value))), remote)}
    }
  }
} 
Example 191
Source File: package.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs

import java.net.InetSocketAddress
import java.nio.channels.ServerSocketChannel

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.stream.ActorMaterializer
import akka.util.ByteString

import scala.concurrent.Future

package object unicomplex {

  // Remove this once Akka-Http exposes this test utility.
  def temporaryServerAddress(interface: String = "127.0.0.1"): InetSocketAddress = {
    val serverSocket = ServerSocketChannel.open()
    try {
      serverSocket.socket.bind(new InetSocketAddress(interface, 0))
      val port = serverSocket.socket.getLocalPort
      new InetSocketAddress(interface, port)
    } finally serverSocket.close()
  }

  def temporaryServerHostnameAndPort(interface: String = "127.0.0.1"): (InetSocketAddress, String, Int) = {
    val socketAddress = temporaryServerAddress(interface)
    (socketAddress, socketAddress.getHostName, socketAddress.getPort)
  }

  def extractEntityAsString(response: HttpResponse)
                           (implicit am: ActorMaterializer, system: ActorSystem): Future[String] = {
    import system.dispatcher
    response.entity.dataBytes.runFold(ByteString(""))(_ ++ _) map(_.utf8String)
  }

  def entityAsString(uri: String)(implicit am: ActorMaterializer, system: ActorSystem): Future[String] = {
    import system.dispatcher
    get(uri) flatMap extractEntityAsString
  }

  def entityAsStringWithHeaders(uri: String)(implicit am: ActorMaterializer, system: ActorSystem): Future[(String, Seq[HttpHeader])] = {
    import system.dispatcher
    get(uri) flatMap( response => extractEntityAsString(response) map((_, response.headers)))
  }

  def entityAsInt(uri: String)(implicit am: ActorMaterializer, system: ActorSystem): Future[Int] = {
    import system.dispatcher
    entityAsString(uri) map (s => s.toInt)
  }

  def get(uri: String)(implicit am: ActorMaterializer, system: ActorSystem): Future[HttpResponse] = {
    Http().singleRequest(HttpRequest(uri = Uri(uri)))
  }

  def post(uri: String, e: RequestEntity)(implicit am: ActorMaterializer, system: ActorSystem): Future[HttpResponse] = {
    Http().singleRequest(HttpRequest(method = HttpMethods.POST, uri = Uri(uri), entity = e))
  }

  def put(uri: String)(implicit am: ActorMaterializer, system: ActorSystem): Future[HttpResponse] = {
    Http().singleRequest(HttpRequest(method = HttpMethods.PUT, uri = Uri(uri)))
  }
} 
Example 192
Source File: PacketProxy.scala    From Neutrino   with Apache License 2.0 5 votes vote down vote up
package com.ebay.neutrino

import java.net.{InetAddress, InetSocketAddress, SocketAddress}
import com.ebay.neutrino.util.Utilities

import scala.concurrent.Future
import scala.util.{Failure, Success}

import com.typesafe.scalalogging.slf4j.StrictLogging
import io.netty.bootstrap.{Bootstrap, ServerBootstrap}
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel._
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.{NioServerSocketChannel, NioSocketChannel}
import io.netty.util.AttributeKey



  override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef) {
    println("Writing packet from downstream to upstream...")
    upstream.writeAndFlush(msg)
    //ctx.fireChannelRead(msg)
  }

  override def channelInactive(ctx: ChannelHandlerContext): Unit = {
    println("Downstream closing..")
    upstream.close()
    ctx.fireChannelInactive()
  }
} 
Example 193
Source File: VirtualServer.scala    From Neutrino   with Apache License 2.0 5 votes vote down vote up
package com.ebay.neutrino.config

import java.net.InetSocketAddress

import com.typesafe.config.Config


// Representation of a Server
case class VirtualServer(
  id: String,
  host: String,
  port: Int,
  weight: Option[Int] = None,
  health: Option[HealthSettings] = None
)
  extends VirtualAddress {
  // Expose VirtualAddress as socket-address (?? Does this cache ??)
  lazy val socketAddress = new InetSocketAddress(host, port)

  // Mutable health state
  @transient var healthState: HealthState = HealthState.Unknown
}


object VirtualServer {

  import Configuration._


  
  def apply(cfg: Config): VirtualServer =
    new VirtualServer(
      cfg getOptionalString "id" getOrElse (cfg getString "host"), // fallback to 'host'
      cfg getString "host",
      cfg getInt "port",
      if (cfg hasPath "weight") Option(cfg getInt "weight") else None
    ) with
      HasConfiguration {
      override val config: Config = cfg
    }
}


// Representation of a Backend Service
//case class Service() 
Example 194
Source File: TelnetClientActor.scala    From asura   with MIT License 5 votes vote down vote up
package asura.dubbo.actor

import java.net.InetSocketAddress

import akka.actor.{ActorRef, Props, Status}
import akka.io.{IO, Tcp}
import akka.util.ByteString
import asura.common.actor.BaseActor
import asura.common.util.LogUtils
import asura.dubbo.DubboConfig

class TelnetClientActor(remote: InetSocketAddress, listener: ActorRef) extends BaseActor {

  import Tcp._
  import context.system

  IO(Tcp) ! Connect(remote)

  override def receive: Receive = {
    case CommandFailed(_: Connect) =>
      listener ! ByteString(s"${TelnetClientActor.MSG_CONNECT_TO} ${remote.getAddress.getHostAddress}:${remote.getPort} ${TelnetClientActor.MSG_FAIL}\r\n")
      context stop self
    case Connected(remote, local) =>
      log.debug(s"local address: ${local}, remote address: ${remote}")
      listener ! ByteString(s"${TelnetClientActor.MSG_CONNECT_TO} ${remote.getAddress.getHostAddress}:${remote.getPort} ${TelnetClientActor.MSG_SUCCESS}\r\n")
      val remoteConnection = sender()
      remoteConnection ! Register(self)
      context.become {
        case data: ByteString =>
          remoteConnection ! Write(data)
        case CommandFailed(_: Write) =>
          listener ! ByteString("write failed\r\n")
        case Received(data) =>
          listener ! data
        case TelnetClientActor.CMD_CLOSE =>
          remoteConnection ! Close
        case _: ConnectionClosed =>
          listener ! ByteString(s"connection to ${remote.getAddress.getHostAddress}:${remote.getPort} closed\r\n")
          context stop self
      }
    case Status.Failure(t) =>
      val stackTrace = LogUtils.stackTraceToString(t)
      log.warning(stackTrace)
      listener ! t.getMessage
      context stop self
  }

  override def postStop(): Unit = log.debug(s"${self.path} stopped")
}


object TelnetClientActor {

  val CMD_CLOSE = "close"
  val MSG_CONNECT_TO = "connect to"
  val MSG_SUCCESS = "success"
  val MSG_FAIL = "fail"

  def props(remote: InetSocketAddress, replies: ActorRef) = {
    Props(new TelnetClientActor(remote, replies))
  }

  def props(address: String, port: Int, replies: ActorRef) = {
    Props(
      new TelnetClientActor(
        new InetSocketAddress(address, if (port > 0) port else DubboConfig.DEFAULT_PORT),
        replies
      )
    )
  }
} 
Example 195
Source File: FlumeTestUtils.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.flume

import java.net.{InetSocketAddress, ServerSocket}
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util.{List => JList}
import java.util.Collections

import scala.collection.JavaConverters._

import org.apache.avro.ipc.NettyTransceiver
import org.apache.avro.ipc.specific.SpecificRequestor
import org.apache.commons.lang3.RandomUtils
import org.apache.flume.source.avro
import org.apache.flume.source.avro.{AvroFlumeEvent, AvroSourceProtocol}
import org.jboss.netty.channel.ChannelPipeline
import org.jboss.netty.channel.socket.SocketChannel
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory
import org.jboss.netty.handler.codec.compression.{ZlibDecoder, ZlibEncoder}

import org.apache.spark.util.Utils
import org.apache.spark.SparkConf


  private class CompressionChannelFactory(compressionLevel: Int)
    extends NioClientSocketChannelFactory {

    override def newChannel(pipeline: ChannelPipeline): SocketChannel = {
      val encoder = new ZlibEncoder(compressionLevel)
      pipeline.addFirst("deflater", encoder)
      pipeline.addFirst("inflater", new ZlibDecoder())
      super.newChannel(pipeline)
    }
  }

} 
Example 196
Source File: NettyRpcHandlerSuite.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.rpc.netty

import java.net.InetSocketAddress
import java.nio.ByteBuffer

import io.netty.channel.Channel
import org.mockito.Mockito._
import org.mockito.Matchers._

import org.apache.spark.SparkFunSuite
import org.apache.spark.network.client.{TransportResponseHandler, TransportClient}
import org.apache.spark.network.server.StreamManager
import org.apache.spark.rpc._

class NettyRpcHandlerSuite extends SparkFunSuite {

  val env = mock(classOf[NettyRpcEnv])
  val sm = mock(classOf[StreamManager])
  when(env.deserialize(any(classOf[TransportClient]), any(classOf[ByteBuffer]))(any()))
    .thenReturn(RequestMessage(RpcAddress("localhost", 12345), null, null))

  test("receive") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.receive(client, null, null)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
  }

  test("connectionTerminated") {
    val dispatcher = mock(classOf[Dispatcher])
    val nettyRpcHandler = new NettyRpcHandler(dispatcher, env, sm)

    val channel = mock(classOf[Channel])
    val client = new TransportClient(channel, mock(classOf[TransportResponseHandler]))
    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.receive(client, null, null)

    when(channel.remoteAddress()).thenReturn(new InetSocketAddress("localhost", 40000))
    nettyRpcHandler.connectionTerminated(client)

    verify(dispatcher, times(1)).postToAll(RemoteProcessConnected(RpcAddress("localhost", 40000)))
    verify(dispatcher, times(1)).postToAll(
      RemoteProcessDisconnected(RpcAddress("localhost", 40000)))
  }

} 
Example 197
Source File: GraphiteSink.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.net.InetSocketAddress
import java.util.Properties
import java.util.concurrent.TimeUnit

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.graphite.{GraphiteUDP, Graphite, GraphiteReporter}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class GraphiteSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val GRAPHITE_DEFAULT_PERIOD = 10
  val GRAPHITE_DEFAULT_UNIT = "SECONDS"
  val GRAPHITE_DEFAULT_PREFIX = ""

  val GRAPHITE_KEY_HOST = "host"
  val GRAPHITE_KEY_PORT = "port"
  val GRAPHITE_KEY_PERIOD = "period"
  val GRAPHITE_KEY_UNIT = "unit"
  val GRAPHITE_KEY_PREFIX = "prefix"
  val GRAPHITE_KEY_PROTOCOL = "protocol"

  def propertyToOption(prop: String): Option[String] = Option(property.getProperty(prop))

  if (!propertyToOption(GRAPHITE_KEY_HOST).isDefined) {
    throw new Exception("Graphite sink requires 'host' property.")
  }

  if (!propertyToOption(GRAPHITE_KEY_PORT).isDefined) {
    throw new Exception("Graphite sink requires 'port' property.")
  }

  val host = propertyToOption(GRAPHITE_KEY_HOST).get
  val port = propertyToOption(GRAPHITE_KEY_PORT).get.toInt

  val pollPeriod = propertyToOption(GRAPHITE_KEY_PERIOD) match {
    case Some(s) => s.toInt
    case None => GRAPHITE_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = propertyToOption(GRAPHITE_KEY_UNIT) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
    case None => TimeUnit.valueOf(GRAPHITE_DEFAULT_UNIT)
  }

  val prefix = propertyToOption(GRAPHITE_KEY_PREFIX).getOrElse(GRAPHITE_DEFAULT_PREFIX)

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val graphite = propertyToOption(GRAPHITE_KEY_PROTOCOL).map(_.toLowerCase) match {
    case Some("udp") => new GraphiteUDP(new InetSocketAddress(host, port))
    case Some("tcp") | None => new Graphite(new InetSocketAddress(host, port))
    case Some(p) => throw new Exception(s"Invalid Graphite protocol: $p")
  }

  val reporter: GraphiteReporter = GraphiteReporter.forRegistry(registry)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .prefixedWith(prefix)
      .build(graphite)

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 198
Source File: FlumeTestUtils.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.flume

import java.net.{InetSocketAddress, ServerSocket}
import java.nio.ByteBuffer
import java.util.{List => JList}
import java.util.Collections

import scala.collection.JavaConverters._

import com.google.common.base.Charsets.UTF_8
import org.apache.avro.ipc.NettyTransceiver
import org.apache.avro.ipc.specific.SpecificRequestor
import org.apache.commons.lang3.RandomUtils
import org.apache.flume.source.avro
import org.apache.flume.source.avro.{AvroSourceProtocol, AvroFlumeEvent}
import org.jboss.netty.channel.ChannelPipeline
import org.jboss.netty.channel.socket.SocketChannel
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory
import org.jboss.netty.handler.codec.compression.{ZlibDecoder, ZlibEncoder}

import org.apache.spark.util.Utils
import org.apache.spark.SparkConf


  private class CompressionChannelFactory(compressionLevel: Int)
    extends NioClientSocketChannelFactory {

    override def newChannel(pipeline: ChannelPipeline): SocketChannel = {
      val encoder = new ZlibEncoder(compressionLevel)
      pipeline.addFirst("deflater", encoder)
      pipeline.addFirst("inflater", new ZlibDecoder())
      super.newChannel(pipeline)
    }
  }

} 
Example 199
Source File: FlumePollingEventCount.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off println
package org.apache.spark.examples.streaming

import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming._
import org.apache.spark.streaming.flume._
import org.apache.spark.util.IntParam
import java.net.InetSocketAddress


object FlumePollingEventCount {
  def main(args: Array[String]) {
    if (args.length < 2) {
      System.err.println(
        "Usage: FlumePollingEventCount <host> <port>")
      System.exit(1)
    }

    StreamingExamples.setStreamingLogLevels()

    val Array(host, IntParam(port)) = args

    val batchInterval = Milliseconds(2000)

    // Create the context and set the batch size
    val sparkConf = new SparkConf().setAppName("FlumePollingEventCount")
    val ssc = new StreamingContext(sparkConf, batchInterval)

    // Create a flume stream that polls the Spark Sink running in a Flume agent
    val stream = FlumeUtils.createPollingStream(ssc, host, port)

    // Print out the count of events received from this server in each batch
    stream.count().map(cnt => "Received " + cnt + " flume events." ).print()

    ssc.start()
    ssc.awaitTermination()
  }
}
// scalastyle:on println 
Example 200
Source File: SunServerImpl.scala    From c4proto   with Apache License 2.0 5 votes vote down vote up
package ee.cone.c4gate

import java.lang.Math.toIntExact
import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

import com.sun.net.httpserver.{HttpExchange, HttpHandler, HttpServer}
import ee.cone.c4actor.{Executable, Execution, FinallyClose, Observer, Trace}
import ee.cone.c4gate.HttpProtocol.N_Header
import okio.ByteString

import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration.{Duration, SECONDS}
import scala.collection.JavaConverters.mapAsScalaMapConverter
import scala.collection.JavaConverters.iterableAsScalaIterableConverter



class SunReqHandler(handler: FHttpHandler, executionContext: ExecutionContext) extends HttpHandler {
  def handle(httpExchange: HttpExchange) =
    Trace{ FinallyClose[HttpExchange,Unit](_.close())(httpExchange) { ex =>
      val method = httpExchange.getRequestMethod
      val path = httpExchange.getRequestURI.getPath
      val reqHeaders: List[N_Header] = httpExchange.getRequestHeaders.asScala
        .flatMap{ case(k,l)=>l.asScala.map(v=>N_Header(k,v)) }.toList
      val buffer = (new okio.Buffer).readFrom(httpExchange.getRequestBody)
      val body = buffer.readByteString()
      val request = FHttpRequest(method, path, reqHeaders, body)
      val responseF = handler.handle(request)(executionContext)
      val response = Await.result(responseF,Duration(600,SECONDS))
      val headers = httpExchange.getResponseHeaders
      response.headers.foreach(header=>headers.add(header.key,header.value))
      val bytes = response.body.toByteArray
      httpExchange.sendResponseHeaders(toIntExact(response.status), bytes.length)
      if(bytes.nonEmpty) httpExchange.getResponseBody.write(bytes)
    } }
}

class SunHttpServer(port: Int, handler: FHttpHandler, execution: Execution) extends Executable {
  def run(): Unit = concurrent.blocking{
    val pool = execution.newExecutorService("http-",None) //newWorkStealingPool
    execution.onShutdown("Pool",()=>{
      val tasks = pool.shutdownNow()
      pool.awaitTermination(Long.MaxValue,TimeUnit.SECONDS)
    })
    val executionContext: ExecutionContext = ExecutionContext.fromExecutor(pool)
    val server: HttpServer = HttpServer.create(new InetSocketAddress(port),0)
    execution.onShutdown("HttpServer",()=>server.stop(Int.MaxValue))
    server.setExecutor(pool)
    server.createContext("/", new SunReqHandler(handler,executionContext))
    server.start()
  }
}

class MutableStatefulReceiver[Message](execution: Execution, inner: List[Observer[Message]]) extends StatefulReceiver[Message] {
  var state: Future[List[Observer[Message]]] = Future.successful(inner)
  def send(message: Message): Unit = execution.fatal{ implicit ec =>
    synchronized{
      state = state.map(_.flatMap(_.activate(message)))
      state
    }
  }
}

class MutableStatefulReceiverFactory(execution: Execution) extends StatefulReceiverFactory {
  def create[Message](inner: List[Observer[Message]])(implicit executionContext: ExecutionContext): Future[StatefulReceiver[Message]] =
    Future.successful(new MutableStatefulReceiver[Message](execution,inner))
}