java.net.ConnectException Scala Examples
The following examples show how to use java.net.ConnectException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: CommonHttpBehaviour.scala From http-verbs with Apache License 2.0 | 5 votes |
package uk.gov.hmrc.http import java.net.ConnectException import java.util.concurrent.TimeoutException import org.scalatest.concurrent.ScalaFutures import org.scalatest.wordspec.AnyWordSpecLike import org.scalatest.matchers.should.Matchers import play.api.libs.json.Json import uk.gov.hmrc.http.logging.{ConnectionTracing, LoggingDetails} import scala.collection.mutable import scala.concurrent.{ExecutionContext, Future} trait CommonHttpBehaviour extends ScalaFutures with Matchers with AnyWordSpecLike { case class TestClass(foo: String, bar: Int) implicit val tcreads = Json.format[TestClass] case class TestRequestClass(baz: String, bar: Int) implicit val trcreads = Json.format[TestRequestClass] implicit val hc = HeaderCarrier() val testBody = "testBody" val testRequestBody = "testRequestBody" val url = "http://some.url" def response(returnValue: Option[String] = None, statusCode: Int = 200) = Future.successful(HttpResponse( status = statusCode, body = returnValue.getOrElse("") )) val defaultHttpResponse = response() def anErrorMappingHttpCall(verb: String, httpCall: (String, Future[HttpResponse]) => Future[_]) = { s"throw a GatewayTimeout exception when the HTTP $verb throws a TimeoutException" in { implicit val hc = HeaderCarrier() val url: String = "http://some.nonexistent.url" val e = httpCall(url, Future.failed(new TimeoutException("timeout"))).failed.futureValue e should be(a[GatewayTimeoutException]) e.getMessage should startWith(verb) e.getMessage should include(url) } s"throw a BadGateway exception when the HTTP $verb throws a ConnectException" in { implicit val hc = HeaderCarrier() val url: String = "http://some.nonexistent.url" val e = httpCall(url, Future.failed(new ConnectException("timeout"))).failed.futureValue e should be(a[BadGatewayException]) e.getMessage should startWith(verb) e.getMessage should include(url) } } def aTracingHttpCall[T <: ConnectionTracingCapturing](verb: String, method: String, httpBuilder: => T)( httpAction: (T => Future[_]))(implicit mf: Manifest[T]) = s"trace exactly once when the HTTP $verb calls $method" in { val http = httpBuilder httpAction(http).futureValue http.traceCalls should have size 1 http.traceCalls.head._1 shouldBe verb } } trait ConnectionTracingCapturing extends ConnectionTracing { val traceCalls = mutable.Buffer[(String, String)]() override def withTracing[T](method: String, uri: String)( body: => Future[T])(implicit ld: LoggingDetails, ec: ExecutionContext) = { traceCalls += ((method, uri)) body } }
Example 2
Source File: SolRSupport.scala From Taxi360 with Apache License 2.0 | 5 votes |
package com.cloudera.sa.taxi360.streaming.ingestion.solr import java.net.{ConnectException, SocketException} import java.util import org.apache.solr.client.solrj.impl.CloudSolrServer import org.apache.solr.client.solrj.request.UpdateRequest import org.apache.solr.common.{SolrException, SolrInputDocument} import org.apache.spark.rdd.RDD import org.apache.spark.streaming.dstream.DStream object SolRSupport { def indexDStreamOfDocs(zkHost:String, collection:String, batchSize:Int, docDStream:DStream[SolrInputDocument]): Unit ={ docDStream.foreachRDD(docRdd => { indexDoc(zkHost, collection, batchSize, docRdd) }) } def indexDoc(zkHost:String, collection:String, batchSize:Int, docRdd:RDD[SolrInputDocument]): Unit = { docRdd.foreachPartition(it => { val solrServer = CloudSolRServerBuilder.build(zkHost) val batch = new util.ArrayList[SolrInputDocument]() while (it.hasNext) { val inputDoc = it.next() batch.add(inputDoc) if (batch.size() >= batchSize) sendBatchToSolr(solrServer, collection, batch) } if (!batch.isEmpty()) sendBatchToSolr(solrServer, collection, batch) }) } def sendBatchToSolr( solrServer: CloudSolrServer, collection:String, batch:util.Collection[SolrInputDocument]) { val req = new UpdateRequest() req.setParam("collection", collection) req.add(batch) try { solrServer.request(req) } catch { case e:Exception => { if (shouldRetry(e)) { try { Thread.sleep(2000) } catch { case e1: InterruptedException => { Thread.interrupted() } } try { solrServer.request(req) } catch { case e1: Exception => { if (e1.isInstanceOf[RuntimeException]) { throw e1.asInstanceOf[RuntimeException] } else { throw new RuntimeException(e1) } } } } else { if (e.isInstanceOf[RuntimeException]) { throw e.asInstanceOf[RuntimeException] } else { throw new RuntimeException(e) } } } } finally { batch.clear() } } def shouldRetry( exc:Exception): Boolean = { val rootCause = SolrException.getRootCause(exc) rootCause.isInstanceOf[ConnectException] || rootCause.isInstanceOf[SocketException] } }
Example 3
Source File: ExchangeRateService.scala From core with Apache License 2.0 | 5 votes |
package com.smartbackpackerapp.service import java.net.ConnectException import cats.MonadError import cats.effect.Effect import cats.syntax.all._ import com.smartbackpackerapp.common.Log import com.smartbackpackerapp.config.SBConfiguration import com.smartbackpackerapp.model.Currency import io.circe.generic.auto._ import org.http4s.circe._ import org.http4s.client.{Client, UnexpectedStatus} class ExchangeRateService[F[_] : Effect](client: Client[F], sbConfig: SBConfiguration[F]) (implicit L: Log[F]) extends AbstractExchangeRateService[F](sbConfig) { override protected def retrieveExchangeRate(uri: String): F[CurrencyExchangeDTO] = { client.expect[CurrencyExchangeDTO](uri)(jsonOf[F, CurrencyExchangeDTO]) } } abstract class AbstractExchangeRateService[F[_]](sbConfig: SBConfiguration[F]) (implicit F: MonadError[F, Throwable], L: Log[F]) { protected val fixerUri: Currency => Currency => F[String] = baseCurrency => foreignCurrency => { val uri = sbConfig.fixerBaseUri.map(_.getOrElse("http://localhost:8081")) uri.map(x => s"$x/latest?base=${baseCurrency.value}&symbols=${foreignCurrency.value}") } protected def retrieveExchangeRate(uri: String): F[CurrencyExchangeDTO] // We don't want the whole destination service to fail if the exchange rate service is unavailable // so the `UnexpectedStatus` and `ConnectException` errors are treated as an empty exchange rate def exchangeRateFor(baseCurrency: Currency, foreignCurrency: Currency): F[CurrencyExchangeDTO] = { val ifEmpty = CurrencyExchangeDTO.empty(baseCurrency).pure[F] def performRequest(uri: String): F[CurrencyExchangeDTO] = retrieveExchangeRate(uri).recoverWith { case e: ConnectException => L.error(e).flatMap(_ => ifEmpty) case _: UnexpectedStatus => ifEmpty } validateCurrencies(baseCurrency, foreignCurrency).fold(ifEmpty) { _ => for { uri <- fixerUri(baseCurrency)(foreignCurrency) _ <- L.info(s"Retrieving currency exchange from: $uri") er <- performRequest(uri) } yield { if (er.rates.nonEmpty) er else er.copy(rates = Map(baseCurrency.value -> -1.0)) } } } private def validateCurrencies(baseCurrency: Currency, foreignCurrency: Currency): Option[Currency] = { if (baseCurrency == foreignCurrency) none[Currency] else foreignCurrency.some } }
Example 4
Source File: DTLSConnectionFn.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.coap.connection import java.io.FileInputStream import java.net.{ConnectException, InetAddress, InetSocketAddress, URI} import java.security.cert.Certificate import java.security.{KeyStore, PrivateKey} import com.datamountaineer.streamreactor.connect.coap.configs.{CoapConstants, CoapSetting} import com.typesafe.scalalogging.StrictLogging import org.eclipse.californium.core.CoapClient import org.eclipse.californium.core.coap.CoAP import org.eclipse.californium.core.network.CoapEndpoint import org.eclipse.californium.core.network.config.NetworkConfig import org.eclipse.californium.scandium.DTLSConnector import org.eclipse.californium.scandium.config.DtlsConnectorConfig import org.eclipse.californium.scandium.dtls.cipher.CipherSuite import org.eclipse.californium.scandium.dtls.pskstore.InMemoryPskStore def discoverServer(address: String, uri: URI): URI = { val client = new CoapClient(s"${uri.getScheme}://$address:${uri.getPort.toString}/.well-known/core") client.useNONs() val response = client.get() if (response != null) { logger.info(s"Discovered Server ${response.advanced().getSource.toString}.") new URI(uri.getScheme, uri.getUserInfo, response.advanced().getSource.getHostName, response.advanced().getSourcePort, uri.getPath, uri.getQuery, uri.getFragment) } else { logger.error(s"Unable to find any servers on local network with multicast address $address.") throw new ConnectException(s"Unable to find any servers on local network with multicast address $address.") } } }
Example 5
Source File: PollingUtils.scala From sttp with Apache License 2.0 | 5 votes |
import java.io.FileNotFoundException import java.net.{ConnectException, URL} import scala.concurrent.TimeoutException import scala.concurrent.duration._ object PollingUtils { def waitUntilServerAvailable(url: URL): Unit = { val connected = poll(5.seconds, 250.milliseconds)({ urlConnectionAvailable(url) }) if (!connected) { throw new TimeoutException(s"Failed to connect to $url") } } def poll(timeout: FiniteDuration, interval: FiniteDuration)(poll: => Boolean): Boolean = { val start = System.nanoTime() def go(): Boolean = { if (poll) { true } else if ((System.nanoTime() - start) > timeout.toNanos) { false } else { Thread.sleep(interval.toMillis) go() } } go() } def urlConnectionAvailable(url: URL): Boolean = { try { url.openConnection() .getInputStream .close() true } catch { case _: ConnectException => false case _: FileNotFoundException => true // on 404 } } }
Example 6
Source File: SocketInputDStream.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.dstream import java.io._ import java.net.{ConnectException, Socket} import java.nio.charset.StandardCharsets import scala.reflect.ClassTag import scala.util.control.NonFatal import org.apache.spark.internal.Logging import org.apache.spark.storage.StorageLevel import org.apache.spark.streaming.StreamingContext import org.apache.spark.streaming.receiver.Receiver import org.apache.spark.util.NextIterator private[streaming] class SocketInputDStream[T: ClassTag]( _ssc: StreamingContext, host: String, port: Int, bytesToObjects: InputStream => Iterator[T], storageLevel: StorageLevel ) extends ReceiverInputDStream[T](_ssc) { def getReceiver(): Receiver[T] = { new SocketReceiver(host, port, bytesToObjects, storageLevel) } } private[streaming] class SocketReceiver[T: ClassTag]( host: String, port: Int, bytesToObjects: InputStream => Iterator[T], storageLevel: StorageLevel ) extends Receiver[T](storageLevel) with Logging { private var socket: Socket = _ def onStart() { logInfo(s"Connecting to $host:$port") try { socket = new Socket(host, port) } catch { case e: ConnectException => restart(s"Error connecting to $host:$port", e) return } logInfo(s"Connected to $host:$port") // Start the thread that receives data over a connection new Thread("Socket Receiver") { setDaemon(true) override def run() { receive() } }.start() } def onStop() { // in case restart thread close it twice synchronized { if (socket != null) { socket.close() socket = null logInfo(s"Closed socket to $host:$port") } } } def bytesToLines(inputStream: InputStream): Iterator[String] = { val dataInputStream = new BufferedReader( new InputStreamReader(inputStream, StandardCharsets.UTF_8)) new NextIterator[String] { protected override def getNext() = { val nextValue = dataInputStream.readLine() if (nextValue == null) { finished = true } nextValue } protected override def close() { dataInputStream.close() } } } }
Example 7
Source File: SocketInputDStream.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.dstream import java.io._ import java.net.{ConnectException, Socket} import java.nio.charset.StandardCharsets import scala.reflect.ClassTag import scala.util.control.NonFatal import org.apache.spark.internal.Logging import org.apache.spark.storage.StorageLevel import org.apache.spark.streaming.StreamingContext import org.apache.spark.streaming.receiver.Receiver import org.apache.spark.util.NextIterator private[streaming] class SocketInputDStream[T: ClassTag]( _ssc: StreamingContext, host: String, port: Int, bytesToObjects: InputStream => Iterator[T], storageLevel: StorageLevel ) extends ReceiverInputDStream[T](_ssc) { def getReceiver(): Receiver[T] = { new SocketReceiver(host, port, bytesToObjects, storageLevel) } } private[streaming] class SocketReceiver[T: ClassTag]( host: String, port: Int, bytesToObjects: InputStream => Iterator[T], storageLevel: StorageLevel ) extends Receiver[T](storageLevel) with Logging { private var socket: Socket = _ def onStart() { logInfo(s"Connecting to $host:$port") try { socket = new Socket(host, port) } catch { case e: ConnectException => restart(s"Error connecting to $host:$port", e) return } logInfo(s"Connected to $host:$port") // Start the thread that receives data over a connection new Thread("Socket Receiver") { setDaemon(true) override def run() { receive() } }.start() } def onStop() { // in case restart thread close it twice synchronized { if (socket != null) { socket.close() socket = null logInfo(s"Closed socket to $host:$port") } } } def bytesToLines(inputStream: InputStream): Iterator[String] = { val dataInputStream = new BufferedReader( new InputStreamReader(inputStream, StandardCharsets.UTF_8)) new NextIterator[String] { protected override def getNext() = { val nextValue = dataInputStream.readLine() if (nextValue == null) { finished = true } nextValue } protected override def close() { dataInputStream.close() } } } }
Example 8
Source File: Broker.scala From ncdbg with BSD 3-Clause "New" or "Revised" License | 5 votes |
package com.programmaticallyspeaking.ncd.boot import java.net.ConnectException import akka.actor.ActorSystem import com.programmaticallyspeaking.ncd.chrome.domains.EventEmitHook import com.programmaticallyspeaking.ncd.chrome.net.FilePublisher import com.programmaticallyspeaking.ncd.config.Conf import com.programmaticallyspeaking.ncd.host.{ScriptEvent, ScriptHost} import com.programmaticallyspeaking.ncd.ioc.Container import com.programmaticallyspeaking.ncd.messaging.Observer import com.programmaticallyspeaking.ncd.nashorn.{NashornDebugger, NashornDebuggerConnector, NashornScriptHost} import org.slf4s.Logging import scala.concurrent.{Future, Promise} import scala.util.control.NonFatal import scala.util.{Failure, Success} case class BrokerConnection(host: NashornScriptHost, disconnect: () => Unit) class Broker(conf: Conf)(implicit actorSystem: ActorSystem) extends Logging { import scala.concurrent.ExecutionContext.Implicits._ def connect(errorCallback: Option[Throwable] => Unit): Future[BrokerConnection] = { val connectAddr = conf.connect() val connector = new NashornDebuggerConnector(connectAddr.host, connectAddr.port) val debuggerReady = connector.connect().map(vm => new NashornDebugger().create(vm)) val connectionPromise = Promise[BrokerConnection]() debuggerReady.onComplete { case Success(host) => startListening(host, errorCallback) try { def disconnect(): Unit = { host.virtualMachine.inner.dispose() } // Writing just 'disconnect' results in compilation warning about deprecated ETA expansion. val conn = BrokerConnection(host, disconnect _) connectionPromise.success(conn) } catch { case NonFatal(t) => log.error("Binding failed", t) connectionPromise.failure(new RuntimeException("connection failed")) } case Failure(t) => t match { case _: ConnectException => log.error("Failed to connect to the debug target.") log.error("Please make sure that the debug target is started with debug VM arguments, for example:") log.error(s" -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=${connectAddr.host}:${connectAddr.port}") case _ => log.error("Failed to start the debugger", t) } // Wrap in RuntimeException if needed, otherwise we'll get UndeclaredThrowableException wrapping the cause. val error = if (t.isInstanceOf[RuntimeException]) t else new RuntimeException(t) connectionPromise.failure(error) } connectionPromise.future } private def startListening(host: NashornScriptHost, errorCallback: Option[Throwable] => Unit) = { host.events.subscribe(new Observer[ScriptEvent] { override def onNext(item: ScriptEvent): Unit = {} override def onError(error: Throwable): Unit = { log.error("Unknown error", error) errorCallback(Some(error)) } override def onComplete(): Unit = { log.info("The debug target disconnected") errorCallback(None) } }) } } class BootContainer(filePublisher: FilePublisher, scriptHost: ScriptHost) extends Container(Seq(filePublisher, scriptHost, new EventEmitHook))
Example 9
Source File: RemoraApp.scala From remora with MIT License | 5 votes |
import java.io.IOException import java.net.ConnectException import java.util.concurrent.{TimeUnit, TimeoutException} import akka.actor.ActorSystem import akka.stream.{ActorMaterializer, ActorMaterializerSettings, Supervision} import com.amazonaws.services.cloudwatch.{AmazonCloudWatchAsync, AmazonCloudWatchAsyncClientBuilder} import com.blacklocus.metrics.CloudWatchReporterBuilder import com.codahale.metrics.jvm.{GarbageCollectorMetricSet, MemoryUsageGaugeSet, ThreadStatesGaugeSet} import com.typesafe.scalalogging.LazyLogging import config.{KafkaSettings, MetricsSettings} import kafka.admin.RemoraKafkaConsumerGroupService import reporter.RemoraDatadogReporter import scala.concurrent.duration._ import scala.util.control.NonFatal object RemoraApp extends App with nl.grons.metrics.scala.DefaultInstrumented with LazyLogging { private val actorSystemName: String = "remora" implicit val actorSystem = ActorSystem(actorSystemName) metricRegistry.registerAll(new GarbageCollectorMetricSet) metricRegistry.registerAll(new MemoryUsageGaugeSet) metricRegistry.registerAll(new ThreadStatesGaugeSet) lazy val decider: Supervision.Decider = { case _: IOException | _: ConnectException | _: TimeoutException => Supervision.Restart case NonFatal(err: Throwable) => actorSystem.log.error(err, "Unhandled Exception in Stream: {}", err.getMessage) Supervision.Stop } implicit val materializer = ActorMaterializer( ActorMaterializerSettings(actorSystem).withSupervisionStrategy(decider))(actorSystem) implicit val executionContext = actorSystem.dispatchers.lookup("kafka-consumer-dispatcher") val kafkaSettings = KafkaSettings(actorSystem.settings.config) val consumer = new RemoraKafkaConsumerGroupService(kafkaSettings) val kafkaClientActor = actorSystem.actorOf(KafkaClientActor.props(consumer), name = "kafka-client-actor") Api(kafkaClientActor).start() val metricsSettings = MetricsSettings(actorSystem.settings.config) if (metricsSettings.registryOptions.enabled) { val exportConsumerMetricsToRegistryActor = actorSystem.actorOf(ExportConsumerMetricsToRegistryActor.props(kafkaClientActor), name = "export-consumer-metrics-actor") actorSystem.scheduler.schedule(0 second, metricsSettings.registryOptions.intervalSeconds second, exportConsumerMetricsToRegistryActor, "export") } if (metricsSettings.cloudWatch.enabled) { logger.info("Reporting metricsRegistry to Cloudwatch") val amazonCloudWatchAsync: AmazonCloudWatchAsync = AmazonCloudWatchAsyncClientBuilder.defaultClient new CloudWatchReporterBuilder() .withNamespace(metricsSettings.cloudWatch.name) .withRegistry(metricRegistry) .withClient(amazonCloudWatchAsync) .build() .start(metricsSettings.cloudWatch.intervalMinutes, TimeUnit.MINUTES) } if (metricsSettings.dataDog.enabled) { logger.info(s"Reporting metricsRegistry to Datadog at ${metricsSettings.dataDog.agentHost}:${metricsSettings.dataDog.agentPort}") val datadogReporter = new RemoraDatadogReporter(metricRegistry, metricsSettings.dataDog) datadogReporter.startReporter() } }
Example 10
Source File: SocketInputDStream.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.dstream import java.io._ import java.net.{ConnectException, Socket} import java.nio.charset.StandardCharsets import scala.reflect.ClassTag import scala.util.control.NonFatal import org.apache.spark.internal.Logging import org.apache.spark.storage.StorageLevel import org.apache.spark.streaming.StreamingContext import org.apache.spark.streaming.receiver.Receiver import org.apache.spark.util.NextIterator private[streaming] class SocketInputDStream[T: ClassTag]( _ssc: StreamingContext, host: String, port: Int, bytesToObjects: InputStream => Iterator[T], storageLevel: StorageLevel ) extends ReceiverInputDStream[T](_ssc) { def getReceiver(): Receiver[T] = { new SocketReceiver(host, port, bytesToObjects, storageLevel) } } private[streaming] class SocketReceiver[T: ClassTag]( host: String, port: Int, bytesToObjects: InputStream => Iterator[T], storageLevel: StorageLevel ) extends Receiver[T](storageLevel) with Logging { private var socket: Socket = _ def onStart() { logInfo(s"Connecting to $host:$port") try { socket = new Socket(host, port) } catch { case e: ConnectException => restart(s"Error connecting to $host:$port", e) return } logInfo(s"Connected to $host:$port") // Start the thread that receives data over a connection new Thread("Socket Receiver") { setDaemon(true) override def run() { receive() } }.start() } def onStop() { // in case restart thread close it twice synchronized { if (socket != null) { socket.close() socket = null logInfo(s"Closed socket to $host:$port") } } } def bytesToLines(inputStream: InputStream): Iterator[String] = { val dataInputStream = new BufferedReader( new InputStreamReader(inputStream, StandardCharsets.UTF_8)) new NextIterator[String] { protected override def getNext() = { val nextValue = dataInputStream.readLine() if (nextValue == null) { finished = true } nextValue } protected override def close() { dataInputStream.close() } } } }
Example 11
Source File: HttpVerb.scala From http-verbs with Apache License 2.0 | 5 votes |
package uk.gov.hmrc.http import java.net.{ConnectException, URL} import java.util.concurrent.TimeoutException import com.typesafe.config.Config import scala.collection.JavaConverters.iterableAsScalaIterableConverter import scala.concurrent.{ExecutionContext, Future} import scala.util.matching.Regex trait HttpVerb extends Request { protected def configuration: Option[Config] def mapErrors(httpMethod: String, url: String, f: Future[HttpResponse])( implicit ec: ExecutionContext): Future[HttpResponse] = f.recoverWith { case e: TimeoutException => Future.failed(new GatewayTimeoutException(gatewayTimeoutMessage(httpMethod, url, e))) case e: ConnectException => Future.failed(new BadGatewayException(badGatewayMessage(httpMethod, url, e))) } def badGatewayMessage(verbName: String, url: String, e: Exception): String = s"$verbName of '$url' failed. Caused by: '${e.getMessage}'" def gatewayTimeoutMessage(verbName: String, url: String, e: Exception): String = s"$verbName of '$url' timed out with message '${e.getMessage}'" lazy val internalHostPatterns: Seq[Regex] = configuration match { case Some(config) if config.hasPathOrNull("internalServiceHostPatterns") => config.getStringList("internalServiceHostPatterns").asScala.map(_.r).toSeq case _ => Seq("^.*\\.service$".r, "^.*\\.mdtp$".r) } lazy val userAgentHeader: Seq[(String, String)] = configuration match { case Some(config) if config.hasPathOrNull("appName") => Seq("User-Agent" -> config.getString("appName")) case _ => Seq.empty } override def applicableHeaders(url: String)(implicit hc: HeaderCarrier): Seq[(String, String)] = { val headers = if (internalHostPatterns.exists(_.pattern.matcher(new URL(url).getHost).matches())) { hc.headers } else { hc.headers.filterNot(hc.otherHeaders.contains(_)) } headers ++ userAgentHeader } }
Example 12
Source File: SocketInputDStream.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.dstream import java.io._ import java.net.{ConnectException, Socket} import java.nio.charset.StandardCharsets import scala.reflect.ClassTag import scala.util.control.NonFatal import org.apache.spark.internal.Logging import org.apache.spark.storage.StorageLevel import org.apache.spark.streaming.StreamingContext import org.apache.spark.streaming.receiver.Receiver import org.apache.spark.util.NextIterator private[streaming] class SocketInputDStream[T: ClassTag]( _ssc: StreamingContext, host: String, port: Int, bytesToObjects: InputStream => Iterator[T], storageLevel: StorageLevel ) extends ReceiverInputDStream[T](_ssc) { def getReceiver(): Receiver[T] = { new SocketReceiver(host, port, bytesToObjects, storageLevel) } } private[streaming] class SocketReceiver[T: ClassTag]( host: String, port: Int, bytesToObjects: InputStream => Iterator[T], storageLevel: StorageLevel ) extends Receiver[T](storageLevel) with Logging { private var socket: Socket = _ def onStart() { logInfo(s"Connecting to $host:$port") try { socket = new Socket(host, port) } catch { case e: ConnectException => restart(s"Error connecting to $host:$port", e) return } logInfo(s"Connected to $host:$port") // Start the thread that receives data over a connection new Thread("Socket Receiver") { setDaemon(true) override def run() { receive() } }.start() } def onStop() { // in case restart thread close it twice synchronized { if (socket != null) { socket.close() socket = null logInfo(s"Closed socket to $host:$port") } } } def bytesToLines(inputStream: InputStream): Iterator[String] = { val dataInputStream = new BufferedReader( new InputStreamReader(inputStream, StandardCharsets.UTF_8)) new NextIterator[String] { protected override def getNext() = { val nextValue = dataInputStream.readLine() if (nextValue == null) { finished = true } nextValue } protected override def close() { dataInputStream.close() } } } }
Example 13
package org.bitcoins.cli import java.net.ConnectException import scala.util.{Failure, Success} object Cli extends App { import System.err.{println => printerr} try { ConsoleCli.exec(args.toVector: _*) match { case Success(output) => println(output) case Failure(err) => printerr(err.getMessage) sys.exit(1) } } catch { case _: ConnectException => printerr( "Connection refused! Check that the server is running and configured correctly.") sys.exit(1) } }
Example 14
Source File: FileSystem.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.experiments.yarn.glue import java.io.{InputStream, OutputStream} import java.net.ConnectException import org.apache.gearpump.util.LogUtil import org.apache.hadoop.fs.Path import scala.util.{Failure, Success, Try} class FileSystem(yarnConfig: YarnConfig) { private val conf = yarnConfig.conf private val fs = org.apache.hadoop.fs.FileSystem.get(conf) private def LOG = LogUtil.getLogger(getClass) def open(file: String): InputStream = exceptionHandler { val path = new Path(file) fs.open(path) } def create(file: String): OutputStream = exceptionHandler { val path = new Path(file) fs.create(path) } def exists(file: String): Boolean = exceptionHandler { val path = new Path(file) fs.exists(path) } def name: String = { fs.getUri.toString } def getHomeDirectory: String = { fs.getHomeDirectory.toString } private def exceptionHandler[T](call: => T): T = { val callTry = Try(call) callTry match { case Success(v) => v case Failure(ex) => if (ex.isInstanceOf[ConnectException]) { LOG.error("Please check whether we connect to the right HDFS file system, " + "current file system is $name." + "\n. Please copy all configs under " + "$HADOOP_HOME/etc/hadoop into conf/yarnconf directory of Gearpump package, " + "so that we can use the right File system.", ex) } throw ex } } }
Example 15
Source File: SolRSupport.scala From Taxi360 with Apache License 2.0 | 5 votes |
package com.hadooparchitecturebook.taxi360.streaming.ingestion.solr import java.net.{ConnectException, SocketException} import java.util import org.apache.solr.client.solrj.impl.CloudSolrServer import org.apache.solr.client.solrj.request.UpdateRequest import org.apache.solr.common.{SolrException, SolrInputDocument} import org.apache.spark.rdd.RDD import org.apache.spark.streaming.dstream.DStream object SolRSupport { def indexDStreamOfDocs(zkHost:String, collection:String, batchSize:Int, docDStream:DStream[SolrInputDocument]): Unit ={ docDStream.foreachRDD(docRdd => { indexDoc(zkHost, collection, batchSize, docRdd) }) } def indexDoc(zkHost:String, collection:String, batchSize:Int, docRdd:RDD[SolrInputDocument]): Unit = { docRdd.foreachPartition(it => { val solrServer = CloudSolRServerBuilder.build(zkHost) val batch = new util.ArrayList[SolrInputDocument]() while (it.hasNext) { val inputDoc = it.next() batch.add(inputDoc) if (batch.size() >= batchSize) sendBatchToSolr(solrServer, collection, batch) } if (!batch.isEmpty()) sendBatchToSolr(solrServer, collection, batch) }) } def sendBatchToSolr( solrServer: CloudSolrServer, collection:String, batch:util.Collection[SolrInputDocument]) { val req = new UpdateRequest() req.setParam("collection", collection) req.add(batch) try { solrServer.request(req) } catch { case e:Exception => { if (shouldRetry(e)) { try { Thread.sleep(2000) } catch { case e1: InterruptedException => { Thread.interrupted() } } try { solrServer.request(req) } catch { case e1: Exception => { if (e1.isInstanceOf[RuntimeException]) { throw e1.asInstanceOf[RuntimeException] } else { throw new RuntimeException(e1) } } } } else { if (e.isInstanceOf[RuntimeException]) { throw e.asInstanceOf[RuntimeException] } else { throw new RuntimeException(e) } } } } finally { batch.clear() } } def shouldRetry( exc:Exception): Boolean = { val rootCause = SolrException.getRootCause(exc) rootCause.isInstanceOf[ConnectException] || rootCause.isInstanceOf[SocketException] } }
Example 16
Source File: RPCUtils.scala From Linkis with Apache License 2.0 | 5 votes |
package com.webank.wedatasphere.linkis.rpc.utils import java.lang.reflect.UndeclaredThrowableException import java.net.ConnectException import com.netflix.client.ClientException import com.webank.wedatasphere.linkis.rpc.exception.NoInstanceExistsException import com.webank.wedatasphere.linkis.rpc.sender.SpringCloudFeignConfigurationCache import feign.RetryableException import org.apache.commons.lang.StringUtils import scala.collection.JavaConversions._ object RPCUtils { def isReceiverNotExists(t: Throwable): Boolean = t match { case connect: ConnectException => connect.getMessage != null && connect.getMessage.contains("Connection refused") case _: NoInstanceExistsException => true case t: UndeclaredThrowableException => t.getCause match { case _: NoInstanceExistsException => true case _ => false } case t: RetryableException => t.getCause match { case connect: ConnectException => connect.getMessage != null && connect.getMessage.contains("Connection refused") case _ => false } case t: RuntimeException => t.getCause match { case client: ClientException => StringUtils.isNotBlank(client.getErrorMessage) && client.getErrorMessage.contains("Load balancer does not have available server for client") case _ => false } case _ => false } def findService(parsedServiceId: String, tooManyDeal: List[String] => Option[String]): Option[String] = { val services = SpringCloudFeignConfigurationCache.getDiscoveryClient .getServices.filter(_.toLowerCase.contains(parsedServiceId.toLowerCase)).toList if(services.length == 1) Some(services.head) else if(services.length > 1) tooManyDeal(services) else None } }
Example 17
Source File: RetryableRPCInterceptor.scala From Linkis with Apache License 2.0 | 5 votes |
package com.webank.wedatasphere.linkis.rpc.interceptor.common import java.net.ConnectException import com.webank.wedatasphere.linkis.common.ServiceInstance import com.webank.wedatasphere.linkis.common.exception.DWCRetryException import com.webank.wedatasphere.linkis.common.utils.RetryHandler import com.webank.wedatasphere.linkis.protocol.RetryableProtocol import com.webank.wedatasphere.linkis.rpc.exception.{DWCRPCRetryException, NoInstanceExistsException} import com.webank.wedatasphere.linkis.rpc.interceptor.{RPCInterceptor, RPCInterceptorChain, RPCInterceptorExchange, ServiceInstanceRPCInterceptorChain} import com.webank.wedatasphere.linkis.rpc.utils.RPCUtils import feign.RetryableException import org.apache.commons.lang.StringUtils import org.springframework.stereotype.Component @Component class RetryableRPCInterceptor extends RPCInterceptor { override val order: Int = 20 // private val commonRetryHandler = new RPCRetryHandler // commonRetryHandler.setRetryInfo(new RetryableProtocol{}) // // private def isCommonRetryHandler(retry: RetryableProtocol): Boolean = retry.maxPeriod == commonRetryHandler.getRetryMaxPeriod && // retry.period == commonRetryHandler.getRetryPeriod && retry.retryNum == commonRetryHandler.getRetryNum && // (retry.retryExceptions.isEmpty || commonRetryHandler.getRetryExceptions.containsSlice(retry.retryExceptions)) override def intercept(interceptorExchange: RPCInterceptorExchange, chain: RPCInterceptorChain): Any = interceptorExchange.getProtocol match { case retry: RetryableProtocol => val retryName = retry.getClass.getSimpleName // if(isCommonRetryHandler(retry)) commonRetryHandler.retry(chain.handle(interceptorExchange), retryName) // else { val retryHandler = new RPCRetryHandler retryHandler.setRetryInfo(retry, chain) retryHandler.retry(chain.handle(interceptorExchange), retryName) // } case _ => chain.handle(interceptorExchange) } class RPCRetryHandler extends RetryHandler { addRetryException(classOf[ConnectException]) addRetryException(classOf[RetryableException]) private var serviceInstance: Option[ServiceInstance] = None def setRetryInfo(retry: RetryableProtocol, chain: RPCInterceptorChain): Unit ={ setRetryNum(retry.retryNum) setRetryPeriod(retry.period) setRetryMaxPeriod(retry.maxPeriod) retry.retryExceptions.foreach(addRetryException) chain match { case s: ServiceInstanceRPCInterceptorChain => serviceInstance = Option(s.getServiceInstance) case _ => } } private def isNoServiceException(t: Throwable): Boolean = RPCUtils.isReceiverNotExists(t) override def exceptionCanRetry(t: Throwable): Boolean = t match { case _: DWCRPCRetryException => true case r: DWCRetryException => r.getErrCode == DWCRPCRetryException.RPC_RETRY_ERROR_CODE case _ => (serviceInstance.exists(s => StringUtils.isBlank(s.getInstance)) && isNoServiceException(t)) || super.exceptionCanRetry(t) } } } object RetryableRPCInterceptor { def isRetryableProtocol(message: Any): Boolean = message match { case protocol: RetryableProtocol => true case _ => false } }