io.grpc.ManagedChannel Scala Examples

The following examples show how to use io.grpc.ManagedChannel. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: LedgerClientIT.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client

import com.daml.ledger.api.testing.utils.{AkkaBeforeAndAfterAll, SuiteResourceManagementAroundEach}
import com.daml.ledger.client.configuration.{
  CommandClientConfiguration,
  LedgerClientConfiguration,
  LedgerIdRequirement
}
import com.daml.platform.sandboxnext.SandboxNextFixture
import io.grpc.ManagedChannel
import org.scalatest.{AsyncWordSpec, Inside, Matchers}

final class LedgerClientIT
    extends AsyncWordSpec
    with Matchers
    with Inside
    with AkkaBeforeAndAfterAll
    with SuiteResourceManagementAroundEach
    with SandboxNextFixture {
  "the ledger client" should {
    "shut down the channel when closed" in {
      val clientConfig = LedgerClientConfiguration(
        applicationId = classOf[LedgerClientIT].getSimpleName,
        ledgerIdRequirement = LedgerIdRequirement.none,
        commandClient = CommandClientConfiguration.default,
        sslContext = None,
        token = None,
      )

      for {
        client <- LedgerClient(channel, clientConfig)
      } yield {
        inside(channel) {
          case channel: ManagedChannel =>
            channel.isShutdown should be(false)
            channel.isTerminated should be(false)

            client.close()

            channel.isShutdown should be(true)
            channel.isTerminated should be(true)
        }
      }
    }
  }
} 
Example 2
Source File: ParticipantSession.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testtool.infrastructure.participant

import com.daml.ledger.api.testtool.infrastructure.LedgerServices
import com.daml.ledger.api.v1.ledger_identity_service.GetLedgerIdentityRequest
import com.daml.ledger.api.v1.transaction_service.GetLedgerEndRequest
import com.daml.timer.RetryStrategy
import io.grpc.ManagedChannel
import io.netty.channel.nio.NioEventLoopGroup
import org.slf4j.LoggerFactory

import scala.concurrent.duration.{DurationInt, SECONDS}
import scala.concurrent.{ExecutionContext, Future}

private[participant] final class ParticipantSession(
    val config: ParticipantSessionConfiguration,
    channel: ManagedChannel,
    eventLoopGroup: NioEventLoopGroup,
)(implicit val executionContext: ExecutionContext) {

  private[this] val logger = LoggerFactory.getLogger(classOf[ParticipantSession])

  private[this] val services: LedgerServices = new LedgerServices(channel)

  // The ledger identifier is retrieved only once when the participant session is created
  // Changing the ledger identifier during the execution of a session can result in unexpected consequences
  // The test tool is designed to run tests in an isolated environment but changing the
  // global state of the ledger breaks this assumption, no matter what
  private[this] val ledgerIdF =
    RetryStrategy.exponentialBackoff(10, 10.millis) { (attempt, wait) =>
      logger.debug(s"Fetching ledgerId to create context (attempt #$attempt, next one in $wait)...")
      services.identity.getLedgerIdentity(new GetLedgerIdentityRequest).map(_.ledgerId)
    }

  private[testtool] def createTestContext(
      endpointId: String,
      applicationId: String,
      identifierSuffix: String,
  ): Future[ParticipantTestContext] =
    for {
      ledgerId <- ledgerIdF
      end <- services.transaction.getLedgerEnd(new GetLedgerEndRequest(ledgerId)).map(_.getOffset)
    } yield
      new ParticipantTestContext(
        ledgerId,
        endpointId,
        applicationId,
        identifierSuffix,
        end,
        services,
        config.partyAllocation,
      )

  private[testtool] def close(): Unit = {
    logger.info(s"Disconnecting from participant at ${config.host}:${config.port}...")
    channel.shutdownNow()
    if (!channel.awaitTermination(10L, SECONDS)) {
      sys.error("Channel shutdown stuck. Unable to recover. Terminating.")
    }
    logger.info(s"Connection to participant at ${config.host}:${config.port} shut down.")
    if (!eventLoopGroup
        .shutdownGracefully(0, 0, SECONDS)
        .await(10L, SECONDS)) {
      sys.error("Unable to shutdown event loop. Unable to recover. Terminating.")
    }
    logger.info(s"Connection to participant at ${config.host}:${config.port} closed.")
  }
} 
Example 3
Source File: ServerStreamingBenchmark.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter

import akka.Done
import akka.stream.scaladsl.Sink
import com.daml.grpc.adapter.client.akka.ClientAdapter
import com.daml.grpc.adapter.operation.AkkaServiceFixture
import com.daml.ledger.api.perf.util.AkkaStreamPerformanceTest
import com.daml.ledger.api.testing.utils.Resource
import com.daml.platform.hello.{HelloRequest, HelloServiceGrpc}
import io.grpc.ManagedChannel
import org.scalameter.api.Gen
import org.scalameter.picklers.noPickler._

import scala.concurrent.Future
import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

object ServerStreamingBenchmark extends AkkaStreamPerformanceTest {

  override type ResourceType = () => ManagedChannel

  @transient override protected lazy val resource: Resource[() => ManagedChannel] =
    AkkaServiceFixture.getResource(Some(new InetSocketAddress(0))).map(_._2.channel)

  private val sizes = for {
    totalElements <- Gen.range("numResponses")(50000, 100000, 50000)
    clients <- Gen.enumeration("numClients")(1, 10)
    callsPerClient <- Gen.enumeration("numCals")(1, 10)
  } yield (totalElements, clients, callsPerClient)

  performance of "Akka-Stream server" config (daConfig: _*) in {
    measure method "server streaming" in {
      using(sizes).withLifecycleManagement() in {
        case (totalElements, clients, callsPerClient) =>
          val eventualDones = for {
            (channel, schedulerPool) <- 1
              .to(clients)
              .map(i => resource.value() -> new AkkaExecutionSequencerPool(s"client-$i")(system))
            _ <- 1.to(callsPerClient)
          } yield {
            serverStreamingCall(totalElements / clients / callsPerClient, channel)(schedulerPool)
              .map(_ => channel -> schedulerPool)
          }
          val eventualTuples = Future.sequence(eventualDones)
          await(eventualTuples).foreach {
            case (channel, pool) =>
              channel.shutdown()
              channel.awaitTermination(5, TimeUnit.SECONDS)
              pool.close()
          }

      }
    }
  }

  private def serverStreamingCall(streamedElements: Int, managedChannel: ManagedChannel)(
      implicit
      executionSequencerFactory: ExecutionSequencerFactory): Future[Done] = {
    ClientAdapter
      .serverStreaming(
        HelloRequest(streamedElements),
        HelloServiceGrpc.stub(managedChannel).serverStreaming)
      .runWith(Sink.ignore)(materializer)
  }
} 
Example 4
Source File: WavesBlockchainClientBuilder.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.grpc.integration

import com.wavesplatform.dex.domain.utils.ScorexLogging
import com.wavesplatform.dex.grpc.integration.clients.{WavesBlockchainCachingClient, WavesBlockchainClient, WavesBlockchainGrpcAsyncClient}
import com.wavesplatform.dex.grpc.integration.settings.WavesBlockchainClientSettings
import io.grpc.ManagedChannel
import io.grpc.internal.DnsNameResolverProvider
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.NioSocketChannel
import monix.execution.Scheduler

import scala.concurrent.{ExecutionContext, Future}

object WavesBlockchainClientBuilder extends ScorexLogging {

  def async(wavesBlockchainClientSettings: WavesBlockchainClientSettings,
            monixScheduler: Scheduler,
            grpcExecutionContext: ExecutionContext): WavesBlockchainClient[Future] = {

    log.info(s"Building gRPC client for server: ${wavesBlockchainClientSettings.grpc.target}")

    val eventLoopGroup = new NioEventLoopGroup

    val channel: ManagedChannel =
      wavesBlockchainClientSettings.grpc.toNettyChannelBuilder
        .nameResolverFactory(new DnsNameResolverProvider)
        .executor((command: Runnable) => grpcExecutionContext.execute(command))
        .eventLoopGroup(eventLoopGroup)
        .channelType(classOf[NioSocketChannel])
        .usePlaintext()
        .build

    new WavesBlockchainCachingClient(
      new WavesBlockchainGrpcAsyncClient(eventLoopGroup, channel, monixScheduler)(grpcExecutionContext),
      wavesBlockchainClientSettings.defaultCachesExpiration,
      monixScheduler
    )(grpcExecutionContext)
  }
} 
Example 5
Source File: GrpcSpec.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.grpc.server

import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKit
import io.grpc.{ManagedChannel, Server}
import ml.combust.mleap.executor.service.TransformService
import ml.combust.mleap.executor.testkit.TransformServiceSpec
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.scalatest.concurrent.ScalaFutures

import scala.concurrent.duration._
import ml.combust.mleap.grpc.server.TestUtil._

class GrpcSpec extends TestKit(ActorSystem("grpc-server-test"))
  with TransformServiceSpec
  with BeforeAndAfterEach
  with BeforeAndAfterAll
  with ScalaFutures {

  private lazy val server = createServer(system)
  private lazy val channel = inProcessChannel
  private lazy val client = createClient(channel)

  override lazy val transformService: TransformService = {
    server
    client
  }

  override implicit def materializer: Materializer = ActorMaterializer()(system)

  override protected def afterAll(): Unit = {
    server.shutdown()
    channel.shutdown()
    TestKit.shutdownActorSystem(system, 5.seconds, verifySystemShutdown = true)
  }
} 
Example 6
Source File: TestUtil.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.grpc.server

import java.io.File
import java.net.URI

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import com.typesafe.config.ConfigFactory
import io.grpc.{ManagedChannel, Server}
import io.grpc.inprocess.{InProcessChannelBuilder, InProcessServerBuilder}
import ml.combust.mleap.executor.MleapExecutor
import ml.combust.mleap.grpc.GrpcClient
import ml.combust.mleap.pb.MleapGrpc
import ml.combust.mleap.pb.MleapGrpc.MleapStub
import ml.combust.mleap.runtime.frame.DefaultLeapFrame
import ml.combust.mleap.runtime.serialization.FrameReader

import scala.concurrent.ExecutionContext
import ExecutionContext.Implicits.global
import scala.util.Try

object TestUtil {

  lazy val lrUri: URI = URI.create(getClass.getClassLoader.getResource("models/airbnb.model.lr.zip").toURI.toString)

  lazy val frame: Try[DefaultLeapFrame] =
    FrameReader().read(new File(getClass.getClassLoader.getResource("leap_frame/frame.airbnb.json").getFile))

  lazy val uniqueServerName : String = "in-process server for " + getClass

  def createServer(system: ActorSystem) : Server = {
    val config = new GrpcServerConfig(ConfigFactory.load().getConfig("ml.combust.mleap.grpc.server.default"))
    val ssd = MleapGrpc.bindService(new GrpcServer(MleapExecutor(system), config)(global, ActorMaterializer.create(system)), global)
    val builder = InProcessServerBuilder.forName(uniqueServerName)
    builder.directExecutor().addService(ssd).intercept(new ErrorInterceptor)
    val server = builder.build
    server.start()
    server
  }

  def createClient(channel: ManagedChannel): GrpcClient = new GrpcClient(new MleapStub(channel))

  def inProcessChannel : ManagedChannel = InProcessChannelBuilder.forName(uniqueServerName).directExecutor.build

} 
Example 7
Source File: ClientSpec.scala    From gemini   with GNU General Public License v3.0 5 votes vote down vote up
package tech.sourced.featurext;

import io.grpc.{ManagedChannel, ManagedChannelBuilder}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
import tech.sourced.featurext.generated.service._
import gopkg.in.bblfsh.sdk.v1.uast.generated.Node
import java.nio.file.{Files, Paths}

@tags.FEIntegration
class ClientSpec extends FlatSpec
  with Matchers
  with BeforeAndAfterAll {

  val serverHost = "localhost"
  val serverPort = 9001
  val fixturePath = "src/test/resources/protomsgs/server.py.proto"

  var channel: ManagedChannel = _
  var blockingStub: FeatureExtractorGrpc.FeatureExtractorBlockingStub = _
  var uast: Node = _

  override def beforeAll(): Unit = {
    super.beforeAll()

    val byteArray: Array[Byte] = Files.readAllBytes(Paths.get(fixturePath))
    uast = Node.parseFrom(byteArray)

    channel = ManagedChannelBuilder.forAddress(serverHost, serverPort).usePlaintext(true).build()
    blockingStub = FeatureExtractorGrpc.blockingStub(channel)
  }

  override def afterAll(): Unit = {
    channel.shutdownNow()
  }

  "identifiers call" should "return correct response" in {
    val request = IdentifiersRequest(uast=Some(uast), options=Some(IdentifiersOptions(docfreqThreshold=5)))
    val reply = blockingStub.identifiers(request)
    var features = reply.features.sortBy(_.name)

    // check correct shape of response
    features.size should be(49)
    features(0).name should be("i.ArgumentParser")
    features(0).weight should be(1)
  }

  "literals call" should "return correct response" in {
    val request = LiteralsRequest(uast=Some(uast), options=Some(LiteralsOptions(docfreqThreshold=5)))
    val reply = blockingStub.literals(request)
    var features = reply.features.sortBy(_.name)

    // check correct shape of response
    features.size should be(16)
    features(0).name should be("l.149420d2b7f04801")
    features(0).weight should be(1)
  }

  "uast2seq call" should "return correct response" in {
    val request = Uast2seqRequest(uast=Some(uast), options=Some(Uast2seqOptions(docfreqThreshold=5)))
    val reply = blockingStub.uast2Seq(request)
    var features = reply.features.sortBy(_.name)

    // check correct shape of response
    features.size should be(207)
    features(0).name should be("s.Assign>Name>Attribute>Call>Expr")
    features(0).weight should be(1)
  }

  "graphlet call" should "return correct response" in {
    val request = GraphletRequest(uast=Some(uast), options=Some(GraphletOptions(docfreqThreshold=5)))
    val reply = blockingStub.graphlet(request)
    var features = reply.features.sortBy(_.name)

    // check correct shape of response
    features.size should be(106)
    features(1).name should be("g.Assign_Call_Attribute")
    features(0).weight should be(1)
  }
} 
Example 8
Source File: ManagedChannelBuilderSyntax.scala    From fs2-grpc   with MIT License 5 votes vote down vote up
package org.lyranthe.fs2_grpc
package java_runtime
package syntax

import cats.effect._
import fs2.Stream
import io.grpc.{ManagedChannel, ManagedChannelBuilder}
import java.util.concurrent.TimeUnit
import scala.concurrent._

trait ManagedChannelBuilderSyntax {
  implicit final def fs2GrpcSyntaxManagedChannelBuilder(builder: ManagedChannelBuilder[_]): ManagedChannelBuilderOps =
    new ManagedChannelBuilderOps(builder)
}

final class ManagedChannelBuilderOps(val builder: ManagedChannelBuilder[_]) extends AnyVal {

  
  def streamWithShutdown[F[_]](shutdown: ManagedChannel => F[Unit])(implicit F: Sync[F]): Stream[F, ManagedChannel] =
    Stream.resource(resourceWithShutdown(shutdown))
} 
Example 9
Source File: GrpcGatewayHandler.scala    From grpcgateway   with MIT License 5 votes vote down vote up
package grpcgateway.handlers

import java.nio.charset.StandardCharsets

import scalapb.GeneratedMessage
import scalapb.json4s.JsonFormat
import io.grpc.ManagedChannel
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel.{ ChannelFutureListener, ChannelHandlerContext, ChannelInboundHandlerAdapter }
import io.netty.handler.codec.http._

import scala.concurrent.{ ExecutionContext, Future }

@Sharable
abstract class GrpcGatewayHandler(channel: ManagedChannel)(implicit ec: ExecutionContext) extends ChannelInboundHandlerAdapter {

  def name: String

  def shutdown(): Unit =
    if (!channel.isShutdown) channel.shutdown()

  def supportsCall(method: HttpMethod, uri: String): Boolean
  def unaryCall(method: HttpMethod, uri: String, body: String): Future[GeneratedMessage]

  override def channelRead(ctx: ChannelHandlerContext, msg: scala.Any): Unit = {

    msg match {
      case req: FullHttpRequest =>

        if (supportsCall(req.method(), req.uri())) {

          val body = req.content().toString(StandardCharsets.UTF_8)

          unaryCall(req.method(), req.uri(), body)
            .map(JsonFormat.toJsonString)
            .map(json => {
              buildFullHttpResponse(
                requestMsg = req,
                responseBody = json,
                responseStatus = HttpResponseStatus.OK,
                responseContentType = "application/json"
              )
            })
            .recover({ case err =>

              val (body, status) = err match {
                case e: GatewayException => e.details -> GRPC_HTTP_CODE_MAP.getOrElse(e.code, HttpResponseStatus.INTERNAL_SERVER_ERROR)
                case _ => "Internal error" -> HttpResponseStatus.INTERNAL_SERVER_ERROR
              }

              buildFullHttpResponse(
                requestMsg = req,
                responseBody = body,
                responseStatus = status,
                responseContentType = "application/text"
              )
            }).foreach(resp => {
              ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE)
            })

        } else {
          super.channelRead(ctx, msg)
        }
      case _ => super.channelRead(ctx, msg)
    }
  }
} 
Example 10
Source File: GrpcTraceReaders.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.reader.stores.readers.grpc

import com.expedia.open.tracing.api.Trace
import com.expedia.open.tracing.backend.{ReadSpansRequest, StorageBackendGrpc}
import com.expedia.www.haystack.commons.metrics.MetricsSupport
import com.expedia.www.haystack.trace.commons.config.entities.TraceStoreBackends
import com.expedia.www.haystack.trace.reader.exceptions.TraceNotFoundException
import com.expedia.www.haystack.trace.reader.metrics.AppMetricNames
import com.expedia.www.haystack.trace.reader.readers.utils.TraceMerger
import io.grpc.{ManagedChannel, ManagedChannelBuilder}
import org.slf4j.LoggerFactory

import scala.collection.JavaConverters._
import scala.concurrent.{ExecutionContextExecutor, Future, Promise}

class GrpcTraceReaders(config: TraceStoreBackends)
                      (implicit val dispatcher: ExecutionContextExecutor) extends MetricsSupport with AutoCloseable {
  private val LOGGER = LoggerFactory.getLogger(classOf[GrpcTraceReaders])

  private val readTimer = metricRegistry.timer(AppMetricNames.BACKEND_READ_TIME)
  private val readFailures = metricRegistry.meter(AppMetricNames.BACKEND_READ_FAILURES)
  private val tracesFailures = metricRegistry.meter(AppMetricNames.BACKEND_TRACES_FAILURE)

  private val clients: Seq[GrpcChannelClient] =  config.backends.map {
    backend => {
      val channel = ManagedChannelBuilder
        .forAddress(backend.host, backend.port)
        .usePlaintext(true)
        .build()

      val client = StorageBackendGrpc.newFutureStub(channel)
      GrpcChannelClient(channel, client)
    }
  }

  def readTraces(traceIds: List[String]): Future[Seq[Trace]] = {
    val allFutures = clients.map {
      client =>
        readTraces(traceIds, client.stub) recoverWith  {
          case _: Exception => Future.successful(Seq.empty[Trace])
        }
    }

    Future.sequence(allFutures)
      .map(traceSeq => traceSeq.flatten)
      .map {
        traces =>
          if (traces.isEmpty) throw new TraceNotFoundException() else TraceMerger.merge(traces)
      }
  }

  private def readTraces(traceIds: List[String], client: StorageBackendGrpc.StorageBackendFutureStub): Future[Seq[Trace]] = {
    val timer = readTimer.time()
    val promise = Promise[Seq[Trace]]

    try {
      val readSpansRequest = ReadSpansRequest.newBuilder().addAllTraceIds(traceIds.asJavaCollection).build()
      val futureResponse = client.readSpans(readSpansRequest)
      futureResponse.addListener(new ReadSpansResponseListener(
        futureResponse,
        promise,
        timer,
        readFailures,
        tracesFailures,
        traceIds.size), dispatcher)

      // return the future with the results for the given client
      promise.future
    } catch {
      case ex: Exception =>
        readFailures.mark()
        timer.stop()
        LOGGER.error("Failed to read raw traces with exception", ex)
        Future.failed(ex)
    }
  }

  override def close(): Unit = {
    clients.foreach(_.channel.shutdown())
  }

  case class GrpcChannelClient(channel: ManagedChannel, stub: StorageBackendGrpc.StorageBackendFutureStub)
} 
Example 11
Source File: ChannelUtils.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.internal

import java.util.concurrent.CompletionStage

import akka.Done
import akka.annotation.InternalApi
import akka.event.LoggingAdapter
import io.grpc.{ ConnectivityState, ManagedChannel }

import scala.compat.java8.FutureConverters._
import scala.concurrent.{ Future, Promise }


  @InternalApi
  private[akka] def monitorChannel(
      ready: Promise[Unit],
      done: Promise[Done],
      channel: ManagedChannel,
      maxConnectionAttempts: Option[Int],
      log: LoggingAdapter): Unit = {
    def monitor(currentState: ConnectivityState, connectionAttempts: Int): Unit = {
      log.debug(s"monitoring with state $currentState and connectionAttempts $connectionAttempts")
      val newAttemptOpt = currentState match {
        case ConnectivityState.TRANSIENT_FAILURE =>
          if (maxConnectionAttempts.contains(connectionAttempts + 1)) {
            val ex = new ClientConnectionException(s"Unable to establish connection after [$maxConnectionAttempts]")
            ready.tryFailure(ex) || done.tryFailure(ex)
            None
          } else Some(connectionAttempts + 1)

        case ConnectivityState.READY =>
          ready.trySuccess(())
          Some(0)

        case ConnectivityState.SHUTDOWN =>
          done.trySuccess(Done)
          None

        case ConnectivityState.IDLE | ConnectivityState.CONNECTING =>
          Some(connectionAttempts)
      }
      newAttemptOpt.foreach { attempts =>
        channel.notifyWhenStateChanged(currentState, () => monitor(channel.getState(false), attempts))
      }
    }
    monitor(channel.getState(false), 0)
  }

} 
Example 12
Source File: ChannelBuilder.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package io.grpc.testing.integration2

import io.grpc.ManagedChannel
import io.grpc.internal.testing.TestUtils
import io.grpc.netty.{ GrpcSslContexts, NegotiationType, NettyChannelBuilder }
import io.netty.handler.ssl.SslContext

object ChannelBuilder {
  def buildChannel(settings: Settings): ManagedChannel =
    if (settings.useAkkaHttp) {
      // TODO: here comes the akka-http based channel (when ready)
      throw new RuntimeException("Not implemented")
    } else {
      val sslContext: SslContext = {
        if (settings.useTestCa) {
          try GrpcSslContexts.forClient.trustManager(TestUtils.loadCert("ca.pem")).build
          catch {
            case ex: Exception => throw new RuntimeException(ex)
          }
        } else null
      }

      val builder =
        NettyChannelBuilder
          .forAddress(settings.serverHost, settings.serverPort)
          .flowControlWindow(65 * 1024)
          .negotiationType(if (settings.useTls) NegotiationType.TLS else NegotiationType.PLAINTEXT)
          .sslContext(sslContext)

      if (settings.serverHostOverride != null)
        builder.overrideAuthority(settings.serverHostOverride)

      builder.build
    }
} 
Example 13
Source File: GRPCClient.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.client.rpc

import java.util.concurrent.TimeUnit

import io.grpc.{ManagedChannel, ManagedChannelBuilder}
import io.radicalbit.nsdb.rpc.health.{HealthCheckRequest, HealthCheckResponse, HealthGrpc}
import io.radicalbit.nsdb.rpc.init._
import io.radicalbit.nsdb.rpc.request.RPCInsert
import io.radicalbit.nsdb.rpc.requestCommand.{DescribeMetric, ShowMetrics, ShowNamespaces}
import io.radicalbit.nsdb.rpc.requestSQL.SQLRequestStatement
import io.radicalbit.nsdb.rpc.response.RPCInsertResult
import io.radicalbit.nsdb.rpc.responseCommand.{DescribeMetricResponse, MetricsGot, Namespaces}
import io.radicalbit.nsdb.rpc.responseSQL.SQLStatementResponse
import io.radicalbit.nsdb.rpc.restore.{RestoreGrpc, RestoreRequest, RestoreResponse}
import io.radicalbit.nsdb.rpc.service.{NSDBServiceCommandGrpc, NSDBServiceSQLGrpc}
import org.slf4j.LoggerFactory

import scala.concurrent.Future


class GRPCClient(host: String, port: Int) {

  private val log = LoggerFactory.getLogger(classOf[GRPCClient])

  private val channel: ManagedChannel = ManagedChannelBuilder.forAddress(host, port).usePlaintext().build
  private val stubHealth              = HealthGrpc.stub(channel)
  private val stubRestore             = RestoreGrpc.stub(channel)
  private val stubSql                 = NSDBServiceSQLGrpc.stub(channel)
  private val stubCommand             = NSDBServiceCommandGrpc.stub(channel)
  private val stubInit                = InitMetricGrpc.stub(channel)

  def checkConnection(): Future[HealthCheckResponse] = {
    log.debug("checking connection")
    stubHealth.check(HealthCheckRequest("whatever"))
  }

  def restore(request: RestoreRequest): Future[RestoreResponse] = {
    log.debug("creating dump")
    stubRestore.restore(request)
  }

  def initMetric(request: InitMetricRequest): Future[InitMetricResponse] = {
    log.debug("Preparing a init request for {}", request)
    stubInit.initMetric(request)
  }

  def write(request: RPCInsert): Future[RPCInsertResult] = {
    log.debug("Preparing a write request for {}...", request)
    stubSql.insertBit(request)
  }

  def executeSQLStatement(request: SQLRequestStatement): Future[SQLStatementResponse] = {
    log.debug("Preparing execution of SQL request: {} ", request.statement)
    stubSql.executeSQLStatement(request)
  }

  def showNamespaces(request: ShowNamespaces): Future[Namespaces] = {
    log.debug("Preparing of command show namespaces")
    stubCommand.showNamespaces(request)
  }

  def showMetrics(request: ShowMetrics): Future[MetricsGot] = {
    log.debug("Preparing of command show metrics for namespace: {} ", request.namespace)
    stubCommand.showMetrics(request)
  }

  def describeMetric(request: DescribeMetric): Future[DescribeMetricResponse] = {
    log.debug("Preparing of command describe metric for namespace: {} ", request.namespace)
    stubCommand.describeMetric(request)
  }

  def close(): Unit = channel.shutdownNow().awaitTermination(10, TimeUnit.SECONDS)

} 
Example 14
Source File: Node.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it

import java.net.{InetSocketAddress, URL}
import java.util.concurrent.TimeUnit

import com.typesafe.config.Config
import com.wavesplatform.account.{KeyPair, PublicKey}
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.it.util.GlobalTimer
import com.wavesplatform.settings.WavesSettings
import com.wavesplatform.state.diffs.FeeValidation
import com.wavesplatform.utils.LoggerFacade
import io.grpc.{ManagedChannel, ManagedChannelBuilder}
import org.asynchttpclient.Dsl.{config => clientConfig, _}
import org.asynchttpclient._
import org.slf4j.LoggerFactory

import scala.concurrent.duration.FiniteDuration

abstract class Node(val config: Config) extends AutoCloseable {
  lazy val log: LoggerFacade =
    LoggerFacade(LoggerFactory.getLogger(s"${getClass.getCanonicalName}.${this.name}"))

  val settings: WavesSettings = WavesSettings.fromRootConfig(config)
  val client: AsyncHttpClient = asyncHttpClient(
    clientConfig()
      .setKeepAlive(false)
      .setNettyTimer(GlobalTimer.instance))

  lazy val grpcChannel: ManagedChannel = ManagedChannelBuilder.forAddress(networkAddress.getHostString, nodeExternalPort(6870))
    .usePlaintext()
    .keepAliveWithoutCalls(true)
    .keepAliveTime(30, TimeUnit.SECONDS)
    .build()

  val keyPair: KeyPair  = KeyPair.fromSeed(config.getString("account-seed")).explicitGet()
  val publicKey: PublicKey = PublicKey.fromBase58String(config.getString("public-key")).explicitGet()
  val address: String      = config.getString("address")

  def nodeExternalPort(internalPort: Int): Int
  def nodeApiEndpoint: URL
  def apiKey: String

  
  def networkAddress: InetSocketAddress

  override def close(): Unit = client.close()
}

object Node {
  implicit class NodeExt(val n: Node) extends AnyVal {
    def name: String               = n.settings.networkSettings.nodeName
    def publicKeyStr: String       = n.publicKey.toString
    def fee(txTypeId: Byte): Long  = FeeValidation.FeeConstants(txTypeId) * FeeValidation.FeeUnit
    def blockDelay: FiniteDuration = n.settings.blockchainSettings.genesisSettings.averageBlockDelay
  }
} 
Example 15
Source File: PubSubAdmin.scala    From scio   with Apache License 2.0 5 votes vote down vote up
package com.spotify.scio.pubsub

import com.google.pubsub.v1.PublisherGrpc.PublisherBlockingStub
import com.google.pubsub.v1.SubscriberGrpc.SubscriberBlockingStub
import com.google.pubsub.v1.{
  GetSubscriptionRequest,
  GetTopicRequest,
  PublisherGrpc,
  SubscriberGrpc,
  Subscription,
  Topic
}
import io.grpc.ManagedChannel
import io.grpc.auth.MoreCallCredentials
import io.grpc.netty.{GrpcSslContexts, NegotiationType, NettyChannelBuilder}
import org.apache.beam.sdk.io.gcp.pubsub.PubsubOptions

import scala.util.Try

object PubSubAdmin {
  private object GrpcClient {
    private def newChannel: ManagedChannel =
      NettyChannelBuilder
        .forAddress("pubsub.googleapis.com", 443)
        .negotiationType(NegotiationType.TLS)
        .sslContext(GrpcSslContexts.forClient.ciphers(null).build)
        .build

    def subscriber[A](pubsubOptions: PubsubOptions)(f: SubscriberBlockingStub => A): Try[A] = {
      val channel = newChannel
      val client = SubscriberGrpc
        .newBlockingStub(channel)
        .withCallCredentials(MoreCallCredentials.from(pubsubOptions.getGcpCredential))

      val result = Try(f(client))
      channel.shutdownNow()
      result
    }

    def publisher[A](pubsubOptions: PubsubOptions)(f: PublisherBlockingStub => A): Try[A] = {
      val channel = newChannel
      val client = PublisherGrpc
        .newBlockingStub(channel)
        .withCallCredentials(MoreCallCredentials.from(pubsubOptions.getGcpCredential))

      val result = Try(f(client))
      channel.shutdownNow()
      result
    }
  }

  
  def subscription(pubsubOptions: PubsubOptions, name: String): Try[Subscription] =
    GrpcClient.subscriber(pubsubOptions) { client =>
      val subRequest = GetSubscriptionRequest.newBuilder().setSubscription(name).build()
      client.getSubscription(subRequest)
    }
} 
Example 16
Source File: HelloWorldClient.scala    From grpc-scala-sample   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package io.grpc.examples.helloworld

import java.util.concurrent.TimeUnit
import java.util.logging.{Level, Logger}

import io.grpc.examples.helloworld.helloworld.{HelloRequest, GreeterGrpc}
import io.grpc.examples.helloworld.helloworld.GreeterGrpc.GreeterBlockingStub
import io.grpc.{StatusRuntimeException, ManagedChannelBuilder, ManagedChannel}


  def greet(name: String): Unit = {
    logger.info("Will try to greet " + name + " ...")
    val request = HelloRequest(name = name)
    try {
      val response = blockingStub.sayHello(request)
      logger.info("Greeting: " + response.message)
    }
    catch {
      case e: StatusRuntimeException =>
        logger.log(Level.WARNING, "RPC failed: {0}", e.getStatus)
    }
  }
}