java.net.UnknownHostException Scala Examples
The following examples show how to use java.net.UnknownHostException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: AkkaClusterConfig.scala From akka-tools with MIT License | 5 votes |
package no.nextgentel.oss.akkatools.cluster import java.net.{UnknownHostException, InetAddress} import java.util.{List => JList} import scala.collection.JavaConverters._ object AkkaClusterConfig { // Java API def create(hostname:String, port:Int, seedNodes:JList[String]):AkkaClusterConfig = { AkkaClusterConfig(Option(hostname), port, seedNodes.asScala.toList) } } case class AkkaClusterConfig ( // Will be resolved if null private val hostname:Option[String], // the tcp port used on this node val port:Int, // list of all seed-nodes - basically single01 and single02 - on the following form: <host>:<port> // e.g in yaml: // - "single01-testing.nextgentel.net:9091" // - "single02-testing.nextgentel.net:9091" val seedNodes:List[String] ) { lazy val thisHostname:String = hostname.getOrElse(resolveHostName()) def thisHostnameAndPort():String = thisHostname+":"+port /** * Generates akka config string use to configure the remote listening on this node, * and info about all other (seed) nodes. * It is VERY important that 'actorSystemName' is using the same name as ActorSystem.create( [name] ). * If this name is not correct, it will not work to connect to the remote actor system, * even though the host and port is correct. * @param actorSystemName the same name as used in ActorSystem.create( [name] ) on all nodes. * @return akka config string that can be used in ConfigFactory.parseString() */ def generateAkkaConfig(actorSystemName:String):String = { if (port == null.asInstanceOf[Int]) throw new Exception("port is not specified") if (seedNodes == null || seedNodes.isEmpty) throw new Exception("seedNodes is not specified") val seedNodesString = "[" + seedNodes.map { hostAndPort => "\"akka.tcp://" + actorSystemName + "@" + hostAndPort + "\"" }.mkString(",") + "]" s"""akka.actor.provider = "akka.cluster.ClusterActorRefProvider" |akka.remote.enabled-transports = ["akka.remote.netty.tcp"] |akka.remote.netty.tcp.hostname="$thisHostname" |akka.remote.netty.tcp.port=$port |akka.cluster.seed-nodes = $seedNodesString """.stripMargin } protected def resolveHostName(): String = { try { val addr: InetAddress = null return InetAddress.getLocalHost.getCanonicalHostName } catch { case ex: UnknownHostException => throw new Exception("Error resolving hostName", ex) } } def withSeedNodeList(newSeedNodeList:List[String]):AkkaClusterConfig = { new AkkaClusterConfig(Some(thisHostname), port, newSeedNodeList) } }
Example 2
Source File: JGitSystemReader.scala From sbt-dynver with Apache License 2.0 | 5 votes |
package sbtdynver import java.io.{ File, IOException } import java.net.{ InetAddress, UnknownHostException } import java.nio.file.{ Files, InvalidPathException, Path, Paths } import org.eclipse.jgit.internal.JGitText import org.eclipse.jgit.lib.{ Config, Constants } import org.eclipse.jgit.storage.file.FileBasedConfig import org.eclipse.jgit.util.{ FS, StringUtils, SystemReader } import org.slf4j.LoggerFactory // Copy of org.eclipse.jgit.util.SystemReader.Default with: // * calls to Files.createDirectories guarded by if !Files.isDirectory // necessary because my ~/.config is a symlink to a directory // which Files.createDirectories isn't happy with object JGitSystemReader extends SystemReader { private val LOG = LoggerFactory.getLogger(getClass) lazy val init: Unit = SystemReader.setInstance(this) override lazy val getHostname = { try InetAddress.getLocalHost.getCanonicalHostName catch { case _: UnknownHostException => "localhost" } }.ensuring(_ != null) override def getenv(variable: String): String = System.getenv(variable) override def getProperty(key: String): String = System.getProperty(key) override def getCurrentTime: Long = System.currentTimeMillis override def getTimezone(when: Long): Int = getTimeZone.getOffset(when) / (60 * 1000) override def openUserConfig(parent: Config, fs: FS) = new FileBasedConfig(parent, new File(fs.userHome, ".gitconfig"), fs) override def openSystemConfig(parent: Config, fs: FS): FileBasedConfig = { if (StringUtils.isEmptyOrNull(getenv(Constants.GIT_CONFIG_NOSYSTEM_KEY))) { val configFile = fs.getGitSystemConfig if (configFile != null) return new FileBasedConfig(parent, configFile, fs) } new FileBasedConfig(parent, null, fs) { override def load(): Unit = () // do not load override def isOutdated = false // regular class would bomb here } } override def openJGitConfig(parent: Config, fs: FS): FileBasedConfig = { val xdgPath = getXDGConfigHome(fs) if (xdgPath != null) { var configPath: Path = null try { configPath = xdgPath.resolve("jgit") if (!Files.isDirectory(configPath)) Files.createDirectories(configPath) configPath = configPath.resolve(Constants.CONFIG) return new FileBasedConfig(parent, configPath.toFile, fs) } catch { case e: IOException => LOG.error(JGitText.get.createJGitConfigFailed, configPath: Any, e) } } new FileBasedConfig(parent, new File(fs.userHome, ".jgitconfig"), fs) } private def getXDGConfigHome(fs: FS): Path = { var configHomePath = getenv(Constants.XDG_CONFIG_HOME) if (StringUtils.isEmptyOrNull(configHomePath)) configHomePath = new File(fs.userHome, ".config").getAbsolutePath try { val xdgHomePath = Paths.get(configHomePath) if (!Files.isDirectory(xdgHomePath)) Files.createDirectories(xdgHomePath) xdgHomePath } catch { case e @ (_: IOException | _: InvalidPathException) => LOG.error(JGitText.get.createXDGConfigHomeFailed, configHomePath: Any, e) null } } }
Example 3
Source File: Dns.scala From perf_tester with Apache License 2.0 | 5 votes |
package akka.io import java.net.{ Inet4Address, Inet6Address, InetAddress, UnknownHostException } import akka.actor._ import akka.routing.ConsistentHashingRouter.ConsistentHashable import com.typesafe.config.Config import scala.collection.{ breakOut, immutable } abstract class Dns { def cached(name: String): Option[Dns.Resolved] = None def resolve(name: String)(system: ActorSystem, sender: ActorRef): Option[Dns.Resolved] = { val ret = cached(name) if (ret.isEmpty) IO(Dns)(system).tell(Dns.Resolve(name), sender) ret } } object Dns extends ExtensionId[DnsExt] with ExtensionIdProvider { sealed trait Command case class Resolve(name: String) extends Command with ConsistentHashable { override def consistentHashKey = name } case class Resolved(name: String, ipv4: immutable.Seq[Inet4Address], ipv6: immutable.Seq[Inet6Address]) extends Command { val addrOption: Option[InetAddress] = IpVersionSelector.getInetAddress(ipv4.headOption, ipv6.headOption) @throws[UnknownHostException] def addr: InetAddress = addrOption match { case Some(ipAddress) ⇒ ipAddress case None ⇒ throw new UnknownHostException(name) } } object Resolved { def apply(name: String, addresses: Iterable[InetAddress]): Resolved = { val ipv4: immutable.Seq[Inet4Address] = addresses.collect({ case a: Inet4Address ⇒ a })(breakOut) val ipv6: immutable.Seq[Inet6Address] = addresses.collect({ case a: Inet6Address ⇒ a })(breakOut) Resolved(name, ipv4, ipv6) } } def cached(name: String)(system: ActorSystem): Option[Resolved] = { Dns(system).cache.cached(name) } def resolve(name: String)(system: ActorSystem, sender: ActorRef): Option[Resolved] = { Dns(system).cache.resolve(name)(system, sender) } override def lookup() = Dns override def createExtension(system: ExtendedActorSystem): DnsExt = new DnsExt(system) override def get(system: ActorSystem): DnsExt = super.get(system) } class DnsExt(system: ExtendedActorSystem) extends IO.Extension { val Settings = new Settings(system.settings.config.getConfig("akka.io.dns")) class Settings private[DnsExt] (_config: Config) { import _config._ val Dispatcher: String = getString("dispatcher") val Resolver: String = getString("resolver") val ResolverConfig: Config = getConfig(Resolver) val ProviderObjectName: String = ResolverConfig.getString("provider-object") } val provider: DnsProvider = system.dynamicAccess.getClassFor[DnsProvider](Settings.ProviderObjectName).get.newInstance() val cache: Dns = provider.cache val manager: ActorRef = { system.systemActorOf( props = Props(provider.managerClass, this).withDeploy(Deploy.local).withDispatcher(Settings.Dispatcher), name = "IO-DNS") } def getResolver: ActorRef = manager } object IpVersionSelector { def getInetAddress(ipv4: Option[Inet4Address], ipv6: Option[Inet6Address]): Option[InetAddress] = System.getProperty("java.net.preferIPv6Addresses") match { case "true" ⇒ ipv6 orElse ipv4 case _ ⇒ ipv4 orElse ipv6 } }
Example 4
Source File: InetAddressDnsResolver.scala From perf_tester with Apache License 2.0 | 5 votes |
package akka.io import java.net.{ InetAddress, UnknownHostException } import java.security.Security import java.util.concurrent.TimeUnit import akka.actor.Actor import com.typesafe.config.Config import scala.collection.immutable import akka.util.Helpers.Requiring import scala.util.Try class InetAddressDnsResolver(cache: SimpleDnsCache, config: Config) extends Actor { // Controls the cache policy for successful lookups only private final val CachePolicyProp = "networkaddress.cache.ttl" // Deprecated JVM property key, keeping for legacy compatibility; replaced by CachePolicyProp private final val CachePolicyPropFallback = "sun.net.inetaddr.ttl" // Controls the cache policy for negative lookups only private final val NegativeCachePolicyProp = "networkaddress.cache.negative.ttl" // Deprecated JVM property key, keeping for legacy compatibility; replaced by NegativeCachePolicyProp private final val NegativeCachePolicyPropFallback = "sun.net.inetaddr.negative.ttl" // default values (-1 and 0 are magic numbers, trust them) private final val Forever = -1 private final val Never = 0 private final val DefaultPositive = 30 private lazy val cachePolicy: Int = { val n = Try(Security.getProperty(CachePolicyProp).toInt) .orElse(Try(Security.getProperty(CachePolicyPropFallback).toInt)) .getOrElse(DefaultPositive) // default if (n < 0) Forever else n } private lazy val negativeCachePolicy = { val n = Try(Security.getProperty(NegativeCachePolicyProp).toInt) .orElse(Try(Security.getProperty(NegativeCachePolicyPropFallback).toInt)) .getOrElse(0) // default if (n < 0) Forever else n } private def getTtl(path: String, positive: Boolean): Long = config.getString(path) match { case "default" ⇒ (if (positive) cachePolicy else negativeCachePolicy) match { case Never ⇒ Never case n if n > 0 ⇒ TimeUnit.SECONDS.toMillis(n) case _ ⇒ Long.MaxValue // forever if negative } case "forever" ⇒ Long.MaxValue case "never" ⇒ Never case _ ⇒ config.getDuration(path, TimeUnit.MILLISECONDS) .requiring(_ > 0, s"akka.io.dns.$path must be 'default', 'forever', 'never' or positive duration") } val positiveTtl: Long = getTtl("positive-ttl", positive = true) val negativeTtl: Long = getTtl("negative-ttl", positive = false) override def receive = { case Dns.Resolve(name) ⇒ val answer = cache.cached(name) match { case Some(a) ⇒ a case None ⇒ try { val answer = Dns.Resolved(name, InetAddress.getAllByName(name)) if (positiveTtl != Never) cache.put(answer, positiveTtl) answer } catch { case e: UnknownHostException ⇒ val answer = Dns.Resolved(name, immutable.Seq.empty, immutable.Seq.empty) if (negativeTtl != Never) cache.put(answer, negativeTtl) answer } } sender() ! answer } }
Example 5
Source File: HostnamePlatform.scala From ip4s with Apache License 2.0 | 5 votes |
package com.comcast.ip4s import java.net.{InetAddress, UnknownHostException} import cats.data.NonEmptyList import cats.effect.Sync private[ip4s] trait HostnamePlatform { self: Hostname => def resolveAll[F[_]: Sync]: F[Option[NonEmptyList[IpAddress]]] = Sync[F].delay { try { val addrs = InetAddress.getAllByName(self.toString) NonEmptyList.fromList(addrs.toList.flatMap(addr => IpAddress.fromBytes(addr.getAddress))) } catch { case _: UnknownHostException => None } } }
Example 6
Source File: SocketInputDStream.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.dstream import scala.util.control.NonFatal import org.apache.spark.streaming.StreamingContext import org.apache.spark.storage.StorageLevel import org.apache.spark.util.NextIterator import scala.reflect.ClassTag import java.io._ import java.net.{UnknownHostException, Socket} import org.apache.spark.Logging import org.apache.spark.streaming.receiver.Receiver private[streaming] class SocketInputDStream[T: ClassTag]( @transient ssc_ : StreamingContext, host: String, port: Int, bytesToObjects: InputStream => Iterator[T], storageLevel: StorageLevel ) extends ReceiverInputDStream[T](ssc_) { def getReceiver(): Receiver[T] = { new SocketReceiver(host, port, bytesToObjects, storageLevel) } } private[streaming] class SocketReceiver[T: ClassTag]( host: String, port: Int, bytesToObjects: InputStream => Iterator[T], storageLevel: StorageLevel ) extends Receiver[T](storageLevel) with Logging { def onStart() { // Start the thread that receives data over a connection new Thread("Socket Receiver") { setDaemon(true) override def run() { receive() } }.start() } def onStop() { // There is nothing much to do as the thread calling receive() // is designed to stop by itself isStopped() returns false } def bytesToLines(inputStream: InputStream): Iterator[String] = { val dataInputStream = new BufferedReader(new InputStreamReader(inputStream, "UTF-8")) new NextIterator[String] { protected override def getNext() = { val nextValue = dataInputStream.readLine() if (nextValue == null) { finished = true } nextValue } protected override def close() { dataInputStream.close() } } } }
Example 7
Source File: SocketInputDStream.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.dstream import scala.util.control.NonFatal import org.apache.spark.streaming.StreamingContext import org.apache.spark.storage.StorageLevel import org.apache.spark.util.NextIterator import scala.reflect.ClassTag import java.io._ import java.net.{UnknownHostException, Socket} import org.apache.spark.Logging import org.apache.spark.streaming.receiver.Receiver private[streaming] class SocketInputDStream[T: ClassTag]( @transient ssc_ : StreamingContext, host: String, port: Int, bytesToObjects: InputStream => Iterator[T], storageLevel: StorageLevel ) extends ReceiverInputDStream[T](ssc_) { def getReceiver(): Receiver[T] = { new SocketReceiver(host, port, bytesToObjects, storageLevel) } } private[streaming] class SocketReceiver[T: ClassTag]( host: String, port: Int, bytesToObjects: InputStream => Iterator[T], storageLevel: StorageLevel ) extends Receiver[T](storageLevel) with Logging { def onStart() { // Start the thread that receives data over a connection //启动接收到连接上的数据的线程 new Thread("Socket Receiver") { setDaemon(true) override def run() { receive() } }.start() } def onStop() { // There is nothing much to do as the thread calling receive() //没有什么可做的线程调用receive() // is designed to stop by itself isStopped() returns false //是为了阻止自己isstopped()返回false } def bytesToLines(inputStream: InputStream): Iterator[String] = { val dataInputStream = new BufferedReader(new InputStreamReader(inputStream, "UTF-8")) new NextIterator[String] { protected override def getNext() = { val nextValue = dataInputStream.readLine() if (nextValue == null) { finished = true } nextValue } protected override def close() { dataInputStream.close() } } } }
Example 8
Source File: AkkaDiscoveryNameResolver.scala From akka-grpc with Apache License 2.0 | 5 votes |
package akka.grpc.internal import java.net.{ InetAddress, InetSocketAddress, UnknownHostException } import akka.discovery.ServiceDiscovery.ResolvedTarget import akka.discovery.{ Lookup, ServiceDiscovery } import akka.grpc.GrpcClientSettings import io.grpc.{ Attributes, EquivalentAddressGroup, NameResolver, Status } import io.grpc.NameResolver.Listener import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ ExecutionContext, Promise } import scala.util.{ Failure, Success } class AkkaDiscoveryNameResolver( discovery: ServiceDiscovery, defaultPort: Int, serviceName: String, portName: Option[String], protocol: Option[String], resolveTimeout: FiniteDuration)(implicit val ec: ExecutionContext) extends NameResolver { override def getServiceAuthority: String = serviceName val listener: Promise[Listener] = Promise() override def start(l: Listener): Unit = { listener.trySuccess(l) lookup(l) } override def refresh(): Unit = listener.future.onComplete { case Success(l) => lookup(l) case Failure(_) => // We never fail this promise } def lookup(listener: Listener): Unit = { discovery.lookup(Lookup(serviceName, portName, protocol), resolveTimeout).onComplete { case Success(result) => try { listener.onAddresses(addresses(result.addresses), Attributes.EMPTY) } catch { case e: UnknownHostException => // TODO at least log listener.onError(Status.UNKNOWN.withDescription(e.getMessage)) } case Failure(e) => // TODO at least log listener.onError(Status.UNKNOWN.withDescription(e.getMessage)) } } @throws[UnknownHostException] private def addresses(addresses: Seq[ResolvedTarget]) = { import scala.collection.JavaConverters._ addresses .map(target => { val port = target.port.getOrElse(defaultPort) val address = target.address.getOrElse(InetAddress.getByName(target.host)) new EquivalentAddressGroup(new InetSocketAddress(address, port)) }) .asJava } override def shutdown(): Unit = () } object AkkaDiscoveryNameResolver { def apply(settings: GrpcClientSettings)(implicit ec: ExecutionContext): AkkaDiscoveryNameResolver = new AkkaDiscoveryNameResolver( settings.serviceDiscovery, settings.defaultPort, settings.serviceName, settings.servicePortName, settings.serviceProtocol, settings.resolveTimeout) }
Example 9
Source File: K8DnsDiscovery.scala From zio-keeper with Apache License 2.0 | 5 votes |
package zio.keeper.discovery import java.net.UnknownHostException import java.util import javax.naming.directory.InitialDirContext import javax.naming.{ Context, NamingException } import zio.duration.Duration import zio.keeper.{ Error, ServiceDiscoveryError } import zio.logging.Logger import zio.nio.core.{ InetAddress, InetSocketAddress, SocketAddress } import zio.{ IO, UIO, ZIO } private class K8DnsDiscovery( log: Logger[String], serviceDns: InetAddress, serviceDnsTimeout: Duration, servicePort: Int ) extends Discovery.Service { final override val discoverNodes: IO[Error, Set[InetSocketAddress]] = { for { _ <- log.info(s"k8s dns dicovery: $serviceDns, port: $servicePort, timeout: $serviceDnsTimeout") addresses <- lookup(serviceDns, serviceDnsTimeout) nodes <- IO.foreach(addresses)(addr => SocketAddress.inetSocketAddress(addr, servicePort)) } yield nodes.toSet[InetSocketAddress] }.catchAllCause { ex => log.error(s"discovery strategy ${this.getClass.getSimpleName} failed.", ex) *> IO.halt(ex.map(e => ServiceDiscoveryError(e.getMessage))) } private def lookup( serviceDns: InetAddress, serviceDnsTimeout: Duration ): IO[Exception, Set[InetAddress]] = { import scala.jdk.CollectionConverters._ val env = new util.Hashtable[String, String] env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.dns.DnsContextFactory") env.put(Context.PROVIDER_URL, "dns:") env.put("com.sun.jndi.dns.timeout.initial", serviceDnsTimeout.toMillis.toString) for { dirContext <- IO.effect(new InitialDirContext(env)).refineToOrDie[NamingException] attributes <- UIO.effectTotal(dirContext.getAttributes(serviceDns.hostname, Array("SRV"))) srvAttribute = Option(attributes.get("srv")).toList.flatMap(_.getAll.asScala) addresses <- ZIO.foldLeft(srvAttribute)(Set.empty[InetAddress]) { case (acc, address: String) => extractHost(address) .flatMap(InetAddress.byName) .map(acc + _) .refineToOrDie[UnknownHostException] case (acc, _) => UIO.succeed(acc) } } yield addresses } private def extractHost(server: String): UIO[String] = log.debug(s"k8 dns on response: $server") *> UIO.effectTotal { val host = server.split(" ")(3) host.replaceAll("\\\\.$", "") } }
Example 10
Source File: SocketInputDStream.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.dstream import scala.util.control.NonFatal import org.apache.spark.streaming.StreamingContext import org.apache.spark.storage.StorageLevel import org.apache.spark.util.NextIterator import scala.reflect.ClassTag import java.io._ import java.net.{UnknownHostException, Socket} import org.apache.spark.Logging import org.apache.spark.streaming.receiver.Receiver private[streaming] class SocketInputDStream[T: ClassTag]( ssc_ : StreamingContext, host: String, port: Int, bytesToObjects: InputStream => Iterator[T], storageLevel: StorageLevel ) extends ReceiverInputDStream[T](ssc_) { def getReceiver(): Receiver[T] = { new SocketReceiver(host, port, bytesToObjects, storageLevel) } } private[streaming] class SocketReceiver[T: ClassTag]( host: String, port: Int, bytesToObjects: InputStream => Iterator[T], storageLevel: StorageLevel ) extends Receiver[T](storageLevel) with Logging { def onStart() { // Start the thread that receives data over a connection new Thread("Socket Receiver") { setDaemon(true) override def run() { receive() } }.start() } def onStop() { // There is nothing much to do as the thread calling receive() // is designed to stop by itself isStopped() returns false } def bytesToLines(inputStream: InputStream): Iterator[String] = { val dataInputStream = new BufferedReader(new InputStreamReader(inputStream, "UTF-8")) new NextIterator[String] { protected override def getNext() = { val nextValue = dataInputStream.readLine() if (nextValue == null) { finished = true } nextValue } protected override def close() { dataInputStream.close() } } } }
Example 11
Source File: ServerContext.scala From Neutrino with Apache License 2.0 | 5 votes |
package com.ebay.neutrino.util import java.net.{UnknownHostException, InetAddress} import com.typesafe.scalalogging.slf4j.StrictLogging object ServerContext extends StrictLogging { val fullHostName = { var hostName : String = "Not Available" try { hostName = InetAddress.getLocalHost.getHostName } catch { case ex : UnknownHostException => logger.warn("Unable to get the hostname") } hostName } val canonicalHostName = { var hostName : String = "Not Available" try { hostName = InetAddress.getLocalHost.getCanonicalHostName } catch { case ex : UnknownHostException => logger.warn("Unable to get the hostname") } hostName } val hostAddress = { var hostAddress : String = "Not Available" try { hostAddress = InetAddress.getLocalHost.getHostAddress } catch { case ex : UnknownHostException => logger.warn("Unable to get the hostaddress") } hostAddress } }
Example 12
Source File: Cassandra.scala From unicorn with Apache License 2.0 | 5 votes |
package unicorn.bigtable.cassandra import java.util.Properties import java.net.{InetAddress, UnknownHostException} import scala.collection.JavaConversions._ import org.apache.cassandra.locator.SimpleSnitch import org.apache.cassandra.thrift.Cassandra.Client import org.apache.cassandra.thrift.{ConsistencyLevel, KsDef, CfDef} import org.apache.thrift.transport.TFramedTransport import org.apache.thrift.transport.TSocket import org.apache.thrift.protocol.TBinaryProtocol import unicorn.bigtable._ import unicorn.util.Logging override def compactTable(name: String): Unit = { // fail silently log.warn("Cassandra client API doesn't support compaction") } } object Cassandra { def apply(host: String, port: Int): Cassandra = { // For ultra-wide row, we set the maxLength to 16MB. // Note that we also need to set the server side configuration // thrift_framed_transport_size_in_mb in cassandra.yaml // In case of ultra-wide row, it is better to use intra row scan. val transport = new TFramedTransport(new TSocket(host, port), 16 * 1024 * 1024) transport.open new Cassandra(transport) } }