java.net.URI Scala Examples
The following examples show how to use java.net.URI.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: MavenExcludeDependenciesSubscriber.scala From RTran with Apache License 2.0 | 5 votes |
package com.ebay.rtran.maven.report import java.io.{File, OutputStream} import java.net.URI import java.util.Optional import ch.qos.logback.classic.spi.ILoggingEvent import com.ebay.rtran.report.api.IReportEventSubscriber import scala.compat.java8.OptionConverters._ import scala.util.Try class MavenExcludeDependenciesSubscriber(projectRoot: File) extends IReportEventSubscriber[ExcludeDependencyEvent] { private[this] var events = Map.empty[URI, Set[ExcludeDependencyEvent]] override def filter(event: scala.Any): Optional[ExcludeDependencyEvent] = { val excludeEvent = event match { case e: ILoggingEvent => if (e.getLoggerName.endsWith("MavenExcludeDependenciesRule") && e.getMessage == "{} excluded {} from {} in {}") { val args = e.getArgumentArray Try(ExcludeDependencyEvent( args(1).asInstanceOf[Set[_]].map(_.toString), args(2).toString, args(3).asInstanceOf[File] )).toOption } else None case _ => None } excludeEvent.asJava } override def dumpTo(outputStream: OutputStream): Unit = if (events.nonEmpty) { val outputTemplate = """ |### MavenExcludeDependenciesRule |The following artifacts were excluded: """.stripMargin val content = events.foldLeft(outputTemplate) {(c, event) => val header = s"\n#### File [${event._1}](${event._1})\n|Artifact|Exclusions|\n|-------|------|\n" c + header + event._2.map(e => e.dep -> e.exclusions).toMap.foldLeft("") {(result, entry) => result + s"|${entry._1}|" + entry._2.foldLeft("<ul>")(_ + "<li>" + _ + "</li>") + "</ul>|\n" } } outputStream.write(content.getBytes("utf8")) } override def doAccept(event: ExcludeDependencyEvent): Unit = { val relativePomPath = projectRoot.toURI relativize event.pomFile.toURI events get relativePomPath match { case Some(set) => events += relativePomPath -> (set + event) case None => events += relativePomPath -> Set(event) } } } case class ExcludeDependencyEvent(exclusions: Set[String], dep: String, pomFile: File)
Example 2
Source File: MavenAddDependenciesSubscriber.scala From RTran with Apache License 2.0 | 5 votes |
package com.ebay.rtran.maven.report import java.io.{File, OutputStream} import java.net.URI import java.util.Optional import ch.qos.logback.classic.spi.ILoggingEvent import com.ebay.rtran.report.api.IReportEventSubscriber import scala.compat.java8.OptionConverters._ import scala.util.Try class MavenAddDependenciesSubscriber(projectRoot: File) extends IReportEventSubscriber[AddDependencyEvent] { private[this] var details = Map.empty[URI, List[String]] override def filter(event: scala.Any): Optional[AddDependencyEvent] = { val artifact = event match { case e: ILoggingEvent => if (e.getLoggerName.endsWith("MavenAddDependenciesRule") && e.getMessage == "{} added dependency {} to {}") { val args = e.getArgumentArray Try(AddDependencyEvent(args(1).toString, args(2).asInstanceOf[File])).toOption } else None case _ => None } artifact.asJava } override def dumpTo(outputStream: OutputStream): Unit = if (details.nonEmpty) { val outputTemplate = """ |### MavenAddDependenciesRule |The following artifacts were added to the POM: """.stripMargin val content = details.foldLeft(outputTemplate) {(c, detail) => val header = s"\n#### File [${detail._1}](${detail._1})\n|Artifacts|\n|---------|\n" c + detail._2.foldLeft(header) {(result, artifact) => result + s"|$artifact|\n" } } outputStream.write(content.getBytes("utf8")) } override def doAccept(event: AddDependencyEvent): Unit = { val relativePomPath = projectRoot.toURI relativize event.pomFile.toURI details get relativePomPath match { case Some(list) => details += relativePomPath -> (event.dependency :: list) case None => details += relativePomPath -> List(event.dependency) } } } case class AddDependencyEvent(dependency: String, pomFile: File)
Example 3
Source File: MavenAddManagedDependenciesSubscriber.scala From RTran with Apache License 2.0 | 5 votes |
package com.ebay.rtran.maven.report import java.io.{File, OutputStream} import java.net.URI import java.util.Optional import ch.qos.logback.classic.spi.ILoggingEvent import com.ebay.rtran.report.api.IReportEventSubscriber import scala.compat.java8.OptionConverters._ import scala.util.Try class MavenAddManagedDependenciesSubscriber(projectRoot: File) extends IReportEventSubscriber[AddManagedDependencyEvent] { private[this] var details = Map.empty[URI, List[String]] override def filter(event: scala.Any): Optional[AddManagedDependencyEvent] = { val artifact = event match { case e: ILoggingEvent => if (e.getLoggerName.endsWith("MavenAddManagedDependenciesRule") && e.getMessage == "{} added managed dependency {} to {}") { val args = e.getArgumentArray Try(AddManagedDependencyEvent(args(1).toString, args(2).asInstanceOf[File])).toOption } else None case _ => None } artifact.asJava } override def dumpTo(outputStream: OutputStream): Unit = if (details.nonEmpty) { val outputTemplate = """ |### MavenAddManagedDependenciesRule |The following artifacts were added to dependencyManagement of the POM: """.stripMargin val content = details.foldLeft(outputTemplate) {(c, detail) => val header = s"\n#### File [${detail._1}](${detail._1})\n|Artifacts|\n|---------|\n" c + detail._2.foldLeft(header) {(result, artifact) => result + s"|$artifact|\n" } } outputStream.write(content.getBytes("utf8")) } override def doAccept(event: AddManagedDependencyEvent): Unit = { val relativePomPath = projectRoot.toURI relativize event.pomFile.toURI details get relativePomPath match { case Some(list) => details += relativePomPath -> (event.dependency :: list) case None => details += relativePomPath -> List(event.dependency) } } } case class AddManagedDependencyEvent(dependency: String, pomFile: File)
Example 4
Source File: KyuubiDistributedCacheManager.scala From kyuubi with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.yarn import java.net.URI import scala.collection.mutable.{HashMap, Map} import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileStatus, FileSystem, Path} import org.apache.hadoop.yarn.api.records.{LocalResource, LocalResourceType} def addResource( fs: FileSystem, conf: Configuration, destPath: Path, localResources: HashMap[String, LocalResource], resourceType: LocalResourceType, link: String, statCache: Map[URI, FileStatus]): Unit = { cacheManager.addResource(fs, conf, destPath, localResources, resourceType, link, statCache, appMasterOnly = true) } }
Example 5
Source File: KyuubiDistributedCacheManagerSuite.scala From kyuubi with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.yarn import java.net.URI import scala.collection.mutable.{HashMap, Map} import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileStatus, FileSystem, Path} import org.apache.hadoop.yarn.api.records.{LocalResource, LocalResourceType, LocalResourceVisibility} import org.apache.hadoop.yarn.util.ConverterUtils import org.apache.spark.{KyuubiSparkUtil, SparkFunSuite} import org.mockito.Mockito.when import org.scalatest.mock.MockitoSugar import yaooqinn.kyuubi.utils.ReflectUtils class KyuubiDistributedCacheManagerSuite extends SparkFunSuite with MockitoSugar { class MockClientDistributedCacheManager extends ClientDistributedCacheManager { override def getVisibility(conf: Configuration, uri: URI, statCache: Map[URI, FileStatus]): LocalResourceVisibility = { LocalResourceVisibility.PRIVATE } } test("add resource") { val fs = mock[FileSystem] val conf = new Configuration() val destPath = new Path("file:///foo.bar.com:8080/tmp/testing") val localResources = HashMap[String, LocalResource]() val statCache = HashMap[URI, FileStatus]() val status = new FileStatus() when(fs.getFileStatus(destPath)).thenReturn(status) val fileLink = "link" ReflectUtils.setFieldValue( KyuubiDistributedCacheManager, "cacheManager", new MockClientDistributedCacheManager) KyuubiDistributedCacheManager.addResource( fs, conf, destPath, localResources, LocalResourceType.FILE, fileLink, statCache) val res = localResources(fileLink) assert(res.getVisibility === LocalResourceVisibility.PRIVATE) assert(ConverterUtils.getPathFromYarnURL(res.getResource) === destPath) assert(res.getSize === 0) assert(res.getTimestamp === 0) assert(res.getType === LocalResourceType.FILE) val status2 = new FileStatus( 10, false, 1, 1024, 10, 10, null, KyuubiSparkUtil.getCurrentUserName, null, new Path("/tmp/testing2")) val destPath2 = new Path("file:///foo.bar.com:8080/tmp/testing2") when(fs.getFileStatus(destPath2)).thenReturn(status2) val fileLink2 = "link2" KyuubiDistributedCacheManager.addResource( fs, conf, destPath2, localResources, LocalResourceType.FILE, fileLink2, statCache) val res2 = localResources(fileLink2) assert(res2.getVisibility === LocalResourceVisibility.PRIVATE) assert(ConverterUtils.getPathFromYarnURL(res2.getResource) === destPath2) assert(res2.getSize === 10) assert(res2.getTimestamp === 10) assert(res2.getType === LocalResourceType.FILE) } test("add resource when link null") { val distMgr = new MockClientDistributedCacheManager() val fs = mock[FileSystem] val conf = new Configuration() val destPath = new Path("file:///foo.bar.com:8080/tmp/testing") ReflectUtils.setFieldValue(KyuubiDistributedCacheManager, "cacheManager", distMgr) val localResources = HashMap[String, LocalResource]() val statCache = HashMap[URI, FileStatus]() when(fs.getFileStatus(destPath)).thenReturn(new FileStatus()) intercept[Exception] { KyuubiDistributedCacheManager.addResource( fs, conf, destPath, localResources, LocalResourceType.FILE, null, statCache) } assert(localResources.get("link") === None) assert(localResources.size === 0) } test("test addResource archive") { val distMgr = new MockClientDistributedCacheManager() ReflectUtils.setFieldValue(KyuubiDistributedCacheManager, "cacheManager", distMgr) val fs = mock[FileSystem] val conf = new Configuration() val destPath = new Path("file:///foo.bar.com:8080/tmp/testing") val localResources = HashMap[String, LocalResource]() val statCache = HashMap[URI, FileStatus]() val realFileStatus = new FileStatus(10, false, 1, 1024, 10, 10, null, "testOwner", null, new Path("/tmp/testing")) when(fs.getFileStatus(destPath)).thenReturn(realFileStatus) KyuubiDistributedCacheManager.addResource( fs, conf, destPath, localResources, LocalResourceType.ARCHIVE, "link", statCache) val resource = localResources("link") assert(resource.getVisibility === LocalResourceVisibility.PRIVATE) assert(ConverterUtils.getPathFromYarnURL(resource.getResource) === destPath) assert(resource.getTimestamp === 10) assert(resource.getSize === 10) assert(resource.getType === LocalResourceType.ARCHIVE) } }
Example 6
Source File: Utils.scala From kyuubi with Apache License 2.0 | 5 votes |
package org.apache.kyuubi import java.io.{File, InputStreamReader, IOException} import java.net.{URI, URISyntaxException} import java.nio.charset.StandardCharsets import java.util.{Properties, UUID} import scala.collection.JavaConverters._ import scala.util.{Success, Try} private[kyuubi] object Utils extends Logging { import org.apache.kyuubi.config.KyuubiConf._ def strToSeq(s: String): Seq[String] = { require(s != null) s.split(",").map(_.trim).filter(_.nonEmpty) } def getSystemProperties: Map[String, String] = { sys.props.toMap } def getDefaultPropertiesFile(env: Map[String, String] = sys.env): Option[File] = { env.get(KYUUBI_CONF_DIR) .orElse(env.get(KYUUBI_HOME).map(_ + File.separator + "/conf")) .map( d => new File(d + File.separator + KYUUBI_CONF_FILE_NAME)) .filter(f => f.exists() && f.isFile) } def getPropertiesFromFile(file: Option[File]): Map[String, String] = { file.map { f => info(s"Loading Kyuubi properties from ${f.getAbsolutePath}") val reader = new InputStreamReader(f.toURI.toURL.openStream(), StandardCharsets.UTF_8) try { val properties = new Properties() properties.load(reader) properties.stringPropertyNames().asScala.map { k => (k, properties.getProperty(k).trim) }.toMap } catch { case e: IOException => throw new KyuubiException( s"Failed when loading Kyuubi properties from ${f.getAbsolutePath}", e) } finally { reader.close() } }.getOrElse(Map.empty) } def createTempDir( root: String = System.getProperty("java.io.tmpdir"), namePrefix: String = "kyuubi"): File = { val dir = createDirectory(root, namePrefix) dir.deleteOnExit() dir } }
Example 7
Source File: GitUrlsParser.scala From jgit-spark-connector with Apache License 2.0 | 5 votes |
package tech.sourced.engine.util import java.net.{URI, URISyntaxException} object GitUrlsParser { private val isGit = """(.+)\@(.+):(.+)\.git""".r def getIdFromUrls(urls: Array[String]): String = { urls.flatMap({ case isGit(_, host, path, _*) => Some(s"$host/$path") case s => try { val u: URI = new URI(s) Some(u.getHost + u.getPath) } catch { case _: URISyntaxException => None } }).distinct.min } }
Example 8
Source File: ExamplesTest.scala From json-schema-codegen with Apache License 2.0 | 5 votes |
import java.net.{Inet6Address, InetAddress, Inet4Address, URI} import java.util.Date import argonaut.Argonaut._ import argonaut._ import org.scalatest.{FlatSpec, Matchers} import product.vox.shop._ class ExamplesTest extends FlatSpec with Matchers { "AdditionalPropertiesJson" should "encode and decode" in { import additional.Codecs._ test(additional.Properties("bvalue", Some(Map("p1" -> additional.PropertiesAdditional(1))))) } "AdditionalPropertiesOnlyJson" should "encode and decode" in { import additional.properties.Codecs._ test(additional.properties.Only(Some(Map("p1" -> additional.properties.OnlyAdditional(1))))) } "EnumStrings" should "encode and decode" in { import Codecs._ test(Strings.a) test(Strings.b) } "EnumIntegers" should "encode and decode" in { import Codecs._ test(Integers.v10) test(Integers.v20) } "Formats" should "encode and decode" in { import Codecs._ test(Formats( new URI("http://uri/address"), InetAddress.getByName("127.0.0.1").asInstanceOf[Inet4Address], InetAddress.getByName("FE80:0000:0000:0000:0202:B3FF:FE1E:8329").asInstanceOf[Inet6Address], new Date() )) } "Product" should "decode from string and encode to string" in { import product.vox.shop.Codecs._ val js = """{"name":"Recharge Cards (5 PIN)","prices":[{"cost":0.0187,"currency":"USD","moq":200000}],"eid":"iso-card-5-pin","description":"<p>ISO card, 5 PINs, printed 4 colour front and back</p>\n<p>Every card option shown below meets Tier 1 operator quality standards, at a competitive pricing including freight to your country that’s always openly visible, with streamlined fulfillment and support included, creating what we believe is the best overall value at the lowest total cost of ownership in the industry.</p>\n<p>Material: Cardboard 300 GSM, UV varnish both sides</p>\n<p>Scratch panel: Silver/Black Ink with black overprint</p> \n<p>Individually plastic wrapped in chain of 50 cards</p>\n<p>Small boxes of 500 cards, Master Carton of 5000 cards</p>\n<p>Alternate names: Scratch cards, RCV, top-up cards</p>\n","properties":[{"name":"Overscratch Protection","options":[{"name":"No protection"},{"name":"Protective measures against over scratching","prices":[{"cost":0.0253,"currency":"USD","moq":200000},{"cost":0.021,"currency":"USD","moq":500000},{"cost":0.02,"currency":"USD","moq":1000000},{"cost":0.0188,"currency":"USD","moq":5000000,"leadtime":21},{"cost":0.0173,"currency":"USD","moq":10000000},{"cost":0.0171,"currency":"USD","moq":50000000,"leadtime":28}]}]},{"name":"Payment terms","options":[{"name":"Payment on shipment readiness"},{"name":"Net 30 (subject to approval)"}]},{"name":"Order Timing","options":[{"name":"Ship order when ready"},{"name":"Pre-order for shipment in 3 months"}]}],"client":"112","sample":{"price":{"cost":250,"currency":"USD"}},"category":"recharge_cards","leadtime":14,"imageUrl":["https://d2w2n7dk76p3lq.cloudfront.net/product_image/recharge_cards/iso-5pin.png"],"types":[{"name":"Recharge Cards (5 PIN)","prices":[{"cost":0.0187,"currency":"USD","moq":200000},{"cost":0.0175,"currency":"USD","moq":500000},{"cost":0.0162,"currency":"USD","moq":1000000},{"cost":0.0153,"currency":"USD","moq":5000000,"leadtime":21},{"cost":0.0138,"currency":"USD","moq":10000000,"leadtime":28},{"cost":0.0137,"currency":"USD","moq":50000000,"leadtime":28}]}],"presentation":1000}""" val po = js.decodeValidation[Product] println(po) po.isSuccess shouldBe true test(po.toOption.get) } def test[T: CodecJson](value: T) = { val json = value.asJson println(json) json.jdecode[T] shouldBe DecodeResult.ok(value) } }
Example 9
Source File: Naming.scala From json-schema-codegen with Apache License 2.0 | 5 votes |
package json.schema.codegen import java.io.File import java.net.URI import json.schema.parser.SchemaDocument import scala.util.control.NonFatal import scalaz.Scalaz._ trait Naming { implicit class StringToolsO(v: Option[String]) { def noneIfEmpty: Option[String] = v match { case Some(s) if s == null || s.isEmpty => none case _ => v } } def packageName(scope: URI): String = { val dots = dotNotation(scope) dots.take(dots.length - 1).mkString(".") } def className(scope: URI): String = { val dots = dotNotation(scope) val name = dots.lastOption.getOrElse(dots.head) escapeReserved(underscoreToCamel(identifier(name))).capitalize } def className(schema: SchemaDocument[_], defaultName: Option[String]): SValidation[String] = schema.id.toRightDisjunction("Schema has no Id").map(className) orElse defaultName.toRightDisjunction("Default name not given").map( name => escapeReserved(underscoreToCamel(identifier(name))).capitalize) def identifier(scope: URI): scalaz.Validation[String, String] = { val str = scope.toString val lastSlash: Int = str.lastIndexOf('/') val lastSegment = (lastSlash >= 0) ? str.substring(lastSlash) | str val noExtSegment = removeExtension(lastSegment) identifier(noExtSegment.filter(c => c != '#')).some.noneIfEmpty.toSuccess(s"Unable to extract identifier from $scope") } def isIdentifier(c: Char): Boolean = c.isLetterOrDigit || c == '_' def isIdentifier(s: String): Boolean = !s.exists(!isIdentifier(_)) def identifier(s: String): String = s.map(c => isIdentifier(c) ? c | '_') def underscoreToCamel(name: String): String = "_([a-z\\d])".r.replaceAllIn(name, _.group(1).toUpperCase) private def removeExtension(s: String) = { val extIndex = s.lastIndexOf('.') (extIndex >= 0) ? s.substring(0, extIndex) | s } private def dotNotation(scope: URI) = { val fragment: String = scope.getFragment.some.noneIfEmpty.map(s => s.startsWith("/") ? s | "/" + s).getOrElse("") // package from URI's fragment, path or host lazy val fromURI: String = scope.getPath.some.noneIfEmpty.getOrElse("") + fragment // package from file URI , using only the file name val simpleScope: String = try { (scope.getScheme == "file") ? (removeExtension(new File(new URI(scope.getScheme, scope.getHost, scope.getPath, null)).getName) + fragment) | fromURI } catch { case NonFatal(e) => fromURI } val dottedString = removeExtension(simpleScope).map(c => Character.isJavaIdentifierPart(c) ? c | '.').replaceAll("\\.+$", "").replaceAll("^\\.+", "") dottedString.split('.').map(s => escapeReserved(underscoreToCamel(identifier(s)))) } def escapePropertyReserved(s: String): Option[String] = if (reservedKeywords.contains(s)) none else s.some def escapeReserved(s: String): String = escapePropertyReserved(s).getOrElse('_' + s) val reservedKeywords: Set[String] }
Example 10
Source File: ScalaNamingTest.scala From json-schema-codegen with Apache License 2.0 | 5 votes |
package json.schema.codegen import java.net.URI import org.scalatest.{FlatSpec, Matchers} class ScalaNamingTest extends FlatSpec with Matchers with ScalaNaming { "ScalaNaming" should "name package" in { packageName(new URI("http://host/b/c#")) shouldBe "b" packageName(new URI("http://host/b/c#")) shouldBe "b" packageName(new URI("http://host/b/c#/d")) shouldBe "b.c" packageName(new URI("#/d/e")) shouldBe "d" packageName(new URI("a#d/e")) shouldBe "a.d" packageName(new URI("file:/a/b/c#d/e")) shouldBe "c.d" packageName(new URI("a-b-c#d/e")) shouldBe "a.b.c.d" } it should "name class" in { className(new URI("http://host/b/c#/d")) shouldBe "D" className(new URI("#/d/e")) shouldBe "E" className(new URI("a#/d/e")) shouldBe "E" className(new URI("file:a#d/e")) shouldBe "E" className(new URI("file:a#class")) shouldBe "Class" className(new URI("a-b-c")) shouldBe "C" className(new URI("file:/Users/todor/Documents/vox/json-schema-codegen/examples/src/main/json-schema/vox-buy-model-shop-requisition.json#/definitions/line")) shouldBe "Line" } }
Example 11
Source File: Settings.scala From vinyldns with Apache License 2.0 | 5 votes |
package controllers import java.net.URI import cats.effect.{Blocker, ContextShift, IO} import cats.implicits._ import com.typesafe.config.{Config, ConfigFactory} import play.api.{ConfigLoader, Configuration} import pureconfig._ import pureconfig.generic.auto._ import pureconfig.module.catseffect.syntax._ import vinyldns.core.repository.DataStoreConfig import scala.collection.JavaConverters._ import scala.concurrent.duration._ // $COVERAGE-OFF$ class Settings(private val config: Configuration) { private implicit val cs: ContextShift[IO] = IO.contextShift(scala.concurrent.ExecutionContext.global) val ldapUser: String = config.get[String]("LDAP.user") val ldapPwd: String = config.get[String]("LDAP.password") val ldapDomain: String = config.get[String]("LDAP.domain") val ldapSearchBase: List[LdapSearchDomain] = config.get[List[LdapSearchDomain]]("LDAP.searchBase") val ldapCtxFactory: String = config.get[String]("LDAP.context.initialContextFactory") val ldapSecurityAuthentication: String = config.get[String]("LDAP.context.securityAuthentication") val ldapProviderUrl: URI = new URI(config.get[String]("LDAP.context.providerUrl")) val ldapUserNameAttribute: String = config.getOptional[String]("LDAP.userNameAttribute").getOrElse("sAMAccountName") val ldapSyncEnabled: Boolean = config.getOptional[Boolean]("LDAP.user-sync.enabled").getOrElse(false) val ldapSyncPollingInterval: FiniteDuration = config .getOptional[Int]("LDAP.user-sync.hours-polling-interval") .getOrElse(24) .hours val portalTestLogin: Boolean = config.getOptional[Boolean]("portal.test_login").getOrElse(false) val dataStoreConfigs: IO[List[DataStoreConfig]] = Blocker[IO].use { blocker => ConfigSource .fromConfig(config.underlying) .at("data-stores") .loadF[IO, List[String]](blocker) .flatMap { lst => lst .map( ConfigSource.fromConfig(config.underlying).at(_).loadF[IO, DataStoreConfig](blocker) ) .parSequence } } val cryptoConfig = IO(config.get[Config]("crypto")) implicit def ldapSearchDomainLoader: ConfigLoader[List[LdapSearchDomain]] = new ConfigLoader[List[LdapSearchDomain]] { def load(config: Config, path: String): List[LdapSearchDomain] = { val domains = config.getConfigList(path).asScala.map { domainConfig ⇒ val org = domainConfig.getString("organization") val domain = domainConfig.getString("domainName") LdapSearchDomain(org, domain) } domains.toList } } } // $COVERAGE-ON$ object Settings extends Settings(Configuration(ConfigFactory.load()))
Example 12
Source File: MQTTTestUtils.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.mqtt import java.net.{ServerSocket, URI} import scala.language.postfixOps import com.google.common.base.Charsets.UTF_8 import org.apache.activemq.broker.{BrokerService, TransportConnector} import org.apache.commons.lang3.RandomUtils import org.eclipse.paho.client.mqttv3._ import org.eclipse.paho.client.mqttv3.persist.MqttDefaultFilePersistence import org.apache.spark.util.Utils import org.apache.spark.{Logging, SparkConf} private[mqtt] class MQTTTestUtils extends Logging { private val persistenceDir = Utils.createTempDir() private val brokerHost = "localhost" private val brokerPort = findFreePort() private var broker: BrokerService = _ private var connector: TransportConnector = _ def brokerUri: String = { s"$brokerHost:$brokerPort" } def setup(): Unit = { broker = new BrokerService() broker.setDataDirectoryFile(Utils.createTempDir()) connector = new TransportConnector() connector.setName("mqtt") connector.setUri(new URI("mqtt://" + brokerUri)) broker.addConnector(connector) broker.start() } def teardown(): Unit = { if (broker != null) { broker.stop() broker = null } if (connector != null) { connector.stop() connector = null } Utils.deleteRecursively(persistenceDir) } private def findFreePort(): Int = { val candidatePort = RandomUtils.nextInt(1024, 65536) Utils.startServiceOnPort(candidatePort, (trialPort: Int) => { val socket = new ServerSocket(trialPort) socket.close() (null, trialPort) }, new SparkConf())._2 } def publishData(topic: String, data: String): Unit = { var client: MqttClient = null try { val persistence = new MqttDefaultFilePersistence(persistenceDir.getAbsolutePath) client = new MqttClient("tcp://" + brokerUri, MqttClient.generateClientId(), persistence) client.connect() if (client.isConnected) { val msgTopic = client.getTopic(topic) val message = new MqttMessage(data.getBytes(UTF_8)) message.setQos(1) message.setRetained(true) for (i <- 0 to 10) { try { msgTopic.publish(message) } catch { case e: MqttException if e.getReasonCode == MqttException.REASON_CODE_MAX_INFLIGHT => // wait for Spark streaming to consume something from the message queue Thread.sleep(50) } } } } finally { if (client != null) { client.disconnect() client.close() client = null } } } }
Example 13
Source File: ApplicationDescription.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import java.net.URI private[spark] class ApplicationDescription( val name: String,//Apllication 名称,可以通过Spark.app.name设置 //这个application最多需要的core个数,可以通过Spark.core.Max设置 val maxCores: Option[Int], //获得每个Executor进程的memory大小,可以通过Spark.Executo.memory,SPARK_EXECUTOR_MEMORY设置 //默认1024MB val memoryPerExecutorMB: Int, //worker Node拉起的ExecutorBanckEnd进程的Command,在Worker接收Master LaunchExecutor //会通过ExecutorRunner启动这个Command.Command包含了启动一个Java进程所需要的信息包括启动 //ClassName所需参数,环境信息等 val command: Command, //Application的Web UI的Hostname,Port var appUiUrl: String, //如果Spark.eventLog.enabled(默认false),指定为true的话,eventLogFile就设置Spark.eventLog.dir定义目录 val eventLogDir: Option[URI] = None, // short name of compression codec used when writing event logs, if any (e.g. lzf) val eventLogCodec: Option[String] = None, //获得每个Executor上分配多少个内核数,默认为1 val coresPerExecutor: Option[Int] = None) extends Serializable { val user = System.getProperty("user.name", "<unknown>") def copy( name: String = name, maxCores: Option[Int] = maxCores, memoryPerExecutorMB: Int = memoryPerExecutorMB, command: Command = command, appUiUrl: String = appUiUrl, eventLogDir: Option[URI] = eventLogDir, eventLogCodec: Option[String] = eventLogCodec): ApplicationDescription = new ApplicationDescription( name, maxCores, memoryPerExecutorMB, command, appUiUrl, eventLogDir, eventLogCodec) override def toString: String = "ApplicationDescription(" + name + ")" }
Example 14
Source File: ClientArguments.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import java.net.{URI, URISyntaxException} import scala.collection.mutable.ListBuffer import org.apache.log4j.Level import org.apache.spark.util.{IntParam, MemoryParam, Utils} private def printUsageAndExit(exitCode: Int) { // TODO: It wouldn't be too hard to allow users to submit their app and dependency jars // separately similar to in the YARN client. val usage = s""" |Usage: DriverClient [options] launch <active-master> <jar-url> <main-class> [driver options] |Usage: DriverClient kill <active-master> <driver-id> | |Options: | -c CORES, --cores CORES Number of cores to request (default: $DEFAULT_CORES) | -m MEMORY, --memory MEMORY Megabytes of memory to request (default: $DEFAULT_MEMORY) | -s, --supervise Whether to restart the driver on failure | (default: $DEFAULT_SUPERVISE) | -v, --verbose Print more debugging output """.stripMargin // scalastyle:off println System.err.println(usage) // scalastyle:on println System.exit(exitCode) } } private[deploy] object ClientArguments { val DEFAULT_CORES = 1 val DEFAULT_MEMORY = Utils.DEFAULT_DRIVER_MEM_MB // MB val DEFAULT_SUPERVISE = false def isValidJarUrl(s: String): Boolean = { try { val uri = new URI(s) uri.getScheme != null && uri.getPath != null && uri.getPath.endsWith(".jar") } catch { case _: URISyntaxException => false } } }
Example 15
Source File: Http.scala From seed with Apache License 2.0 | 5 votes |
package seed.publish.util import java.net.URI import org.apache.commons.io.IOUtils import org.apache.http.{HttpHost, HttpRequest, HttpRequestInterceptor} import org.apache.http.entity.ContentType import seed.util.ZioHelpers._ import zio.Task import org.apache.http.auth.AuthScope import org.apache.http.auth.UsernamePasswordCredentials import org.apache.http.client.protocol.HttpClientContext import org.apache.http.impl.auth.BasicScheme import org.apache.http.impl.client.{BasicAuthCache, BasicCredentialsProvider} import org.apache.http.impl.nio.client.CloseableHttpAsyncClient import org.apache.http.impl.nio.client.HttpAsyncClients import org.apache.http.nio.client.methods.HttpAsyncMethods import org.apache.http.nio.protocol.HttpAsyncRequestProducer import org.apache.http.protocol.HttpContext class Http(httpClient: CloseableHttpAsyncClient) { def put(url: String, bytes: Array[Byte]): Task[String] = { val producer = HttpAsyncMethods.createPut(url, bytes, ContentType.DEFAULT_BINARY) send(url, producer) } def post(url: String, bytes: Array[Byte]): Task[String] = { val producer = HttpAsyncMethods.createPost(url, bytes, ContentType.DEFAULT_BINARY) send(url, producer) } def destroy(): Unit = httpClient.close() private def send(url: String, producer: HttpAsyncRequestProducer) = { val client = new CompletableHttpAsyncClient(httpClient) val uri = URI.create(url) val targetHost = new HttpHost(uri.getHost, uri.getPort, uri.getScheme) val authCache = new BasicAuthCache() authCache.put(targetHost, new BasicScheme()) val clientContext = HttpClientContext.create() clientContext.setAuthCache(authCache) val future = client.execute(producer, HttpAsyncMethods.createConsumer(), clientContext) fromCompletableFuture(future) .map(r => IOUtils.toString(r.getEntity.getContent, "UTF-8")) } } class CustomRequestInterceptor(log: seed.Log) extends HttpRequestInterceptor { override def process(request: HttpRequest, context: HttpContext): Unit = log.debug("Sending HTTP request " + request + "...") } object Http { def create(log: seed.Log, authHost: String, auth: (String, String)): Http = { val credsProvider = new BasicCredentialsProvider() credsProvider.setCredentials( new AuthScope(authHost, 443), new UsernamePasswordCredentials(auth._1, auth._2) ) val c = HttpAsyncClients .custom() .setDefaultCredentialsProvider(credsProvider) .addInterceptorFirst(new CustomRequestInterceptor(log)) .build() c.start() new Http(c) } }
Example 16
Source File: BuildEvents.scala From seed with Apache License 2.0 | 5 votes |
package seed.cli import java.net.URI import seed.Log import seed.cli.util.WsClient import seed.Cli.Command object BuildEvents { def ui(command: Command.BuildEvents, log: Log): Unit = { val connection = command.webSocket val uri = s"ws://${connection.host}:${connection.port}" log.debug(s"Sending command to $uri...") val client = new WsClient(new URI(uri), () => { import io.circe.syntax._ (WsCommand.BuildEvents: WsCommand).asJson.noSpaces }, log) client.connect() } }
Example 17
Source File: WsClient.scala From seed with Apache License 2.0 | 5 votes |
package seed.cli.util import java.net.URI import java.nio.ByteBuffer import org.java_websocket.client.WebSocketClient import org.java_websocket.handshake.ServerHandshake import seed.Log class WsClient(serverUri: URI, payload: () => String, log: Log) extends WebSocketClient(serverUri) { override def onOpen(handshake: ServerHandshake): Unit = { log.debug("Connection established") send(payload()) } override def onClose(code: Int, reason: String, remote: Boolean): Unit = log.debug("Connection closed") override def onMessage(message: String): Unit = print(message) override def onMessage(message: ByteBuffer): Unit = {} override def onError(ex: Exception): Unit = log.error(s"An error occurred: $ex") }
Example 18
Source File: FileStorage.scala From ratatool with Apache License 2.0 | 5 votes |
package com.spotify.ratatool.io import java.io.FileNotFoundException import java.net.URI import java.nio.ByteBuffer import java.nio.channels.SeekableByteChannel import org.apache.avro.file.SeekableInput import org.apache.beam.sdk.io.FileSystems import org.apache.beam.sdk.io.fs.MatchResult.Metadata import scala.jdk.CollectionConverters._ private[ratatool] object FileStorage { def apply(path: String): FileStorage = new FileStorage(path) def isLocalUri(uri: URI): Boolean = uri.getScheme == null || uri.getScheme == "file" def isGcsUri(uri: URI): Boolean = uri.getScheme == "gs" def isHdfsUri(uri: URI): Boolean = uri.getScheme == "hdfs" } private[ratatool] class FileStorage(protected[io] val path: String) { def exists: Boolean = ! FileSystems.`match`(path).metadata.isEmpty def listFiles: Seq[Metadata] = FileSystems.`match`(path).metadata().asScala.toList def isDone: Boolean = { val partPattern = "([0-9]{5})-of-([0-9]{5})".r val metadata = try { listFiles } catch { case e: FileNotFoundException => Seq.empty } val nums = metadata.flatMap { meta => val m = partPattern.findAllIn(meta.resourceId().toString) if (m.hasNext) { Some(m.group(1).toInt, m.group(2).toInt) } else { None } } if (metadata.isEmpty) { // empty list false } else if (nums.nonEmpty) { // found xxxxx-of-yyyyy pattern val parts = nums.map(_._1).sorted val total = nums.map(_._2).toSet metadata.size == nums.size && // all paths matched total.size == 1 && total.head == parts.size && // yyyyy part parts.head == 0 && parts.last + 1 == parts.size // xxxxx part } else { true } } }
Example 19
Source File: AkkaDiscoveryNameResolverProvider.scala From akka-grpc with Apache License 2.0 | 5 votes |
package akka.grpc.internal import java.net.URI import akka.discovery.ServiceDiscovery import io.grpc.{ NameResolver, NameResolverProvider } import scala.concurrent.ExecutionContext import scala.concurrent.duration.FiniteDuration class AkkaDiscoveryNameResolverProvider( discovery: ServiceDiscovery, defaultPort: Int, portName: Option[String], protocol: Option[String], resolveTimeout: FiniteDuration)(implicit ec: ExecutionContext) extends NameResolverProvider { override def isAvailable: Boolean = true override def priority(): Int = 5 override def getDefaultScheme: String = "http" override def newNameResolver(targetUri: URI, args: NameResolver.Args): AkkaDiscoveryNameResolver = { require(targetUri.getAuthority != null, s"target uri should not have null authority, got [$targetUri]") new AkkaDiscoveryNameResolver(discovery, defaultPort, targetUri.getAuthority, portName, protocol, resolveTimeout) } }
Example 20
Source File: AkkaDiscoveryNameResolverProviderSpec.scala From akka-grpc with Apache License 2.0 | 5 votes |
package akka.grpc.internal import java.net.URI import java.net.InetSocketAddress import java.util.{ List => JList } import scala.concurrent.ExecutionContext.Implicits._ import scala.concurrent.Future import scala.concurrent.Promise import scala.concurrent.duration._ import scala.collection.immutable import io.grpc.Attributes import io.grpc.NameResolver.Listener import io.grpc.EquivalentAddressGroup import akka.actor.ActorSystem import akka.discovery.Lookup import akka.discovery.ServiceDiscovery import akka.discovery.ServiceDiscovery.Resolved import akka.discovery.ServiceDiscovery.ResolvedTarget import akka.testkit.TestKit import org.scalatest.concurrent.ScalaFutures import org.scalatest.matchers.should.Matchers import org.scalatest.time.{ Millis, Seconds, Span } import org.scalatest.wordspec.AnyWordSpecLike class AkkaDiscoveryNameResolverProviderSpec extends TestKit(ActorSystem()) with AnyWordSpecLike with Matchers with ScalaFutures { implicit override val patienceConfig = PatienceConfig(timeout = scaled(Span(2, Seconds)), interval = scaled(Span(5, Millis))) "AkkaDiscoveryNameResolverProviderSpec" should { "provide a NameResolver that uses the supplied serviceName" in { val serviceName = "testServiceName" val discovery = new ServiceDiscovery() { override def lookup(lookup: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = { lookup.serviceName should be(serviceName) Future.successful(Resolved(serviceName, immutable.Seq(ResolvedTarget("10.0.0.3", Some(4312), None)))) } } val provider = new AkkaDiscoveryNameResolverProvider( discovery, 443, portName = None, protocol = None, resolveTimeout = 3.seconds) val resolver = provider.newNameResolver(new URI("//" + serviceName), null) val addressGroupsPromise = Promise[List[EquivalentAddressGroup]] val listener = new Listener() { override def onAddresses(addresses: JList[EquivalentAddressGroup], attributes: Attributes): Unit = { import scala.collection.JavaConverters._ addressGroupsPromise.success(addresses.asScala.toList) } override def onError(error: io.grpc.Status): Unit = ??? } resolver.start(listener) val addressGroups = addressGroupsPromise.future.futureValue addressGroups.size should be(1) val addresses = addressGroups(0).getAddresses() addresses.size should be(1) val address = addresses.get(0).asInstanceOf[InetSocketAddress] address.getHostString() should be("10.0.0.3") address.getPort() should be(4312) } } }
Example 21
Source File: AnalysisTest.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.catalyst.analysis import java.net.URI import java.util.Locale import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.catalog.{CatalogDatabase, InMemoryCatalog, SessionCatalog} import org.apache.spark.sql.catalyst.plans.PlanTest import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.internal.SQLConf trait AnalysisTest extends PlanTest { protected val caseSensitiveAnalyzer = makeAnalyzer(caseSensitive = true) protected val caseInsensitiveAnalyzer = makeAnalyzer(caseSensitive = false) private def makeAnalyzer(caseSensitive: Boolean): Analyzer = { val conf = new SQLConf().copy(SQLConf.CASE_SENSITIVE -> caseSensitive) val catalog = new SessionCatalog(new InMemoryCatalog, FunctionRegistry.builtin, conf) catalog.createDatabase( CatalogDatabase("default", "", new URI("loc"), Map.empty), ignoreIfExists = false) catalog.createTempView("TaBlE", TestRelations.testRelation, overrideIfExists = true) catalog.createTempView("TaBlE2", TestRelations.testRelation2, overrideIfExists = true) catalog.createTempView("TaBlE3", TestRelations.testRelation3, overrideIfExists = true) new Analyzer(catalog, conf) { override val extendedResolutionRules = EliminateSubqueryAliases :: Nil } } protected def getAnalyzer(caseSensitive: Boolean) = { if (caseSensitive) caseSensitiveAnalyzer else caseInsensitiveAnalyzer } protected def checkAnalysis( inputPlan: LogicalPlan, expectedPlan: LogicalPlan, caseSensitive: Boolean = true): Unit = { val analyzer = getAnalyzer(caseSensitive) val actualPlan = analyzer.executeAndCheck(inputPlan) comparePlans(actualPlan, expectedPlan) } protected override def comparePlans( plan1: LogicalPlan, plan2: LogicalPlan, checkAnalysis: Boolean = false): Unit = { // Analysis tests may have not been fully resolved, so skip checkAnalysis. super.comparePlans(plan1, plan2, checkAnalysis) } protected def assertAnalysisSuccess( inputPlan: LogicalPlan, caseSensitive: Boolean = true): Unit = { val analyzer = getAnalyzer(caseSensitive) val analysisAttempt = analyzer.execute(inputPlan) try analyzer.checkAnalysis(analysisAttempt) catch { case a: AnalysisException => fail( s""" |Failed to Analyze Plan |$inputPlan | |Partial Analysis |$analysisAttempt """.stripMargin, a) } } protected def assertAnalysisError( inputPlan: LogicalPlan, expectedErrors: Seq[String], caseSensitive: Boolean = true): Unit = { val analyzer = getAnalyzer(caseSensitive) val e = intercept[AnalysisException] { analyzer.checkAnalysis(analyzer.execute(inputPlan)) } if (!expectedErrors.map(_.toLowerCase(Locale.ROOT)).forall( e.getMessage.toLowerCase(Locale.ROOT).contains)) { fail( s"""Exception message should contain the following substrings: | | ${expectedErrors.mkString("\n ")} | |Actual exception message: | | ${e.getMessage} """.stripMargin) } } }
Example 22
Source File: HadoopFileLinesReader.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.datasources import java.io.Closeable import java.net.URI import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.hadoop.io.Text import org.apache.hadoop.mapreduce._ import org.apache.hadoop.mapreduce.lib.input.{FileSplit, LineRecordReader} import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl class HadoopFileLinesReader( file: PartitionedFile, conf: Configuration) extends Iterator[Text] with Closeable { private val iterator = { val fileSplit = new FileSplit( new Path(new URI(file.filePath)), file.start, file.length, // TODO: Implement Locality Array.empty) val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0) val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId) val reader = new LineRecordReader() reader.initialize(fileSplit, hadoopAttemptContext) new RecordReaderIterator(reader) } override def hasNext: Boolean = iterator.hasNext override def next(): Text = iterator.next() override def close(): Unit = iterator.close() }
Example 23
Source File: HadoopFileWholeTextReader.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.datasources import java.io.Closeable import java.net.URI import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.hadoop.io.Text import org.apache.hadoop.mapreduce._ import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl import org.apache.spark.input.WholeTextFileRecordReader class HadoopFileWholeTextReader(file: PartitionedFile, conf: Configuration) extends Iterator[Text] with Closeable { private val iterator = { val fileSplit = new CombineFileSplit( Array(new Path(new URI(file.filePath))), Array(file.start), Array(file.length), // TODO: Implement Locality Array.empty[String]) val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0) val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId) val reader = new WholeTextFileRecordReader(fileSplit, hadoopAttemptContext, 0) reader.initialize(fileSplit, hadoopAttemptContext) new RecordReaderIterator(reader) } override def hasNext: Boolean = iterator.hasNext override def next(): Text = iterator.next() override def close(): Unit = iterator.close() }
Example 24
Source File: resources.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.command import java.io.File import java.net.URI import org.apache.hadoop.fs.Path import org.apache.spark.sql.{Row, SparkSession} import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference} import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType} case class ListJarsCommand(jars: Seq[String] = Seq.empty[String]) extends RunnableCommand { override val output: Seq[Attribute] = { AttributeReference("Results", StringType, nullable = false)() :: Nil } override def run(sparkSession: SparkSession): Seq[Row] = { val jarList = sparkSession.sparkContext.listJars() if (jars.nonEmpty) { for { jarName <- jars.map(f => new Path(f).getName) jarPath <- jarList if jarPath.contains(jarName) } yield Row(jarPath) } else { jarList.map(Row(_)) } } }
Example 25
Source File: FileStreamSinkLog.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.streaming import java.net.URI import org.apache.hadoop.fs.{FileStatus, Path} import org.json4s.NoTypeHints import org.json4s.jackson.Serialization import org.apache.spark.sql.SparkSession import org.apache.spark.sql.internal.SQLConf class FileStreamSinkLog( metadataLogVersion: Int, sparkSession: SparkSession, path: String) extends CompactibleFileStreamLog[SinkFileStatus](metadataLogVersion, sparkSession, path) { private implicit val formats = Serialization.formats(NoTypeHints) protected override val fileCleanupDelayMs = sparkSession.sessionState.conf.fileSinkLogCleanupDelay protected override val isDeletingExpiredLog = sparkSession.sessionState.conf.fileSinkLogDeletion protected override val defaultCompactInterval = sparkSession.sessionState.conf.fileSinkLogCompactInterval require(defaultCompactInterval > 0, s"Please set ${SQLConf.FILE_SINK_LOG_COMPACT_INTERVAL.key} (was $defaultCompactInterval) " + "to a positive value.") override def compactLogs(logs: Seq[SinkFileStatus]): Seq[SinkFileStatus] = { val deletedFiles = logs.filter(_.action == FileStreamSinkLog.DELETE_ACTION).map(_.path).toSet if (deletedFiles.isEmpty) { logs } else { logs.filter(f => !deletedFiles.contains(f.path)) } } } object FileStreamSinkLog { val VERSION = 1 val DELETE_ACTION = "delete" val ADD_ACTION = "add" }
Example 26
Source File: ApplicationDescription.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import java.net.URI private[spark] case class ApplicationDescription( name: String, maxCores: Option[Int], memoryPerExecutorMB: Int, command: Command, appUiUrl: String, eventLogDir: Option[URI] = None, // short name of compression codec used when writing event logs, if any (e.g. lzf) eventLogCodec: Option[String] = None, coresPerExecutor: Option[Int] = None, // number of executors this application wants to start with, // only used if dynamic allocation is enabled initialExecutorLimit: Option[Int] = None, user: String = System.getProperty("user.name", "<unknown>")) { override def toString: String = "ApplicationDescription(" + name + ")" }
Example 27
Source File: ClientArguments.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import java.net.{URI, URISyntaxException} import scala.annotation.tailrec import scala.collection.mutable.ListBuffer import org.apache.log4j.Level import org.apache.spark.util.{IntParam, MemoryParam, Utils} private def printUsageAndExit(exitCode: Int) { // TODO: It wouldn't be too hard to allow users to submit their app and dependency jars // separately similar to in the YARN client. val usage = s""" |Usage: DriverClient [options] launch <active-master> <jar-url> <main-class> [driver options] |Usage: DriverClient kill <active-master> <driver-id> | |Options: | -c CORES, --cores CORES Number of cores to request (default: $DEFAULT_CORES) | -m MEMORY, --memory MEMORY Megabytes of memory to request (default: $DEFAULT_MEMORY) | -s, --supervise Whether to restart the driver on failure | (default: $DEFAULT_SUPERVISE) | -v, --verbose Print more debugging output """.stripMargin // scalastyle:off println System.err.println(usage) // scalastyle:on println System.exit(exitCode) } } private[deploy] object ClientArguments { val DEFAULT_CORES = 1 val DEFAULT_MEMORY = Utils.DEFAULT_DRIVER_MEM_MB // MB val DEFAULT_SUPERVISE = false def isValidJarUrl(s: String): Boolean = { try { val uri = new URI(s) uri.getScheme != null && uri.getPath != null && uri.getPath.endsWith(".jar") } catch { case _: URISyntaxException => false } } }
Example 28
Source File: MQTTTestUtils.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.mqtt import java.net.{ServerSocket, URI} import scala.language.postfixOps import com.google.common.base.Charsets.UTF_8 import org.apache.activemq.broker.{BrokerService, TransportConnector} import org.apache.commons.lang3.RandomUtils import org.eclipse.paho.client.mqttv3._ import org.eclipse.paho.client.mqttv3.persist.MqttDefaultFilePersistence import org.apache.spark.util.Utils import org.apache.spark.{Logging, SparkConf} private[mqtt] class MQTTTestUtils extends Logging { private val persistenceDir = Utils.createTempDir() private val brokerHost = "localhost" private val brokerPort = findFreePort() private var broker: BrokerService = _ private var connector: TransportConnector = _ def brokerUri: String = { s"$brokerHost:$brokerPort" } def setup(): Unit = { broker = new BrokerService() broker.setDataDirectoryFile(Utils.createTempDir()) connector = new TransportConnector() connector.setName("mqtt") connector.setUri(new URI("mqtt://" + brokerUri)) broker.addConnector(connector) broker.start() } def teardown(): Unit = { if (broker != null) { broker.stop() broker = null } if (connector != null) { connector.stop() connector = null } Utils.deleteRecursively(persistenceDir) } private def findFreePort(): Int = { val candidatePort = RandomUtils.nextInt(1024, 65536) Utils.startServiceOnPort(candidatePort, (trialPort: Int) => { val socket = new ServerSocket(trialPort) socket.close() (null, trialPort) }, new SparkConf())._2 } def publishData(topic: String, data: String): Unit = { var client: MqttClient = null try { val persistence = new MqttDefaultFilePersistence(persistenceDir.getAbsolutePath) client = new MqttClient("tcp://" + brokerUri, MqttClient.generateClientId(), persistence) client.connect() if (client.isConnected) { val msgTopic = client.getTopic(topic) val message = new MqttMessage(data.getBytes(UTF_8)) message.setQos(1) message.setRetained(true) for (i <- 0 to 10) { try { msgTopic.publish(message) } catch { case e: MqttException if e.getReasonCode == MqttException.REASON_CODE_MAX_INFLIGHT => // wait for Spark streaming to consume something from the message queue Thread.sleep(50) } } } } finally { if (client != null) { client.disconnect() client.close() client = null } } } }
Example 29
Source File: ApplicationDescription.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import java.net.URI private[spark] case class ApplicationDescription( name: String, maxCores: Option[Int], memoryPerExecutorMB: Int, command: Command, appUiUrl: String, eventLogDir: Option[URI] = None, // short name of compression codec used when writing event logs, if any (e.g. lzf) eventLogCodec: Option[String] = None, coresPerExecutor: Option[Int] = None, user: String = System.getProperty("user.name", "<unknown>")) { override def toString: String = "ApplicationDescription(" + name + ")" }
Example 30
Source File: ClientArguments.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import java.net.{URI, URISyntaxException} import scala.collection.mutable.ListBuffer import org.apache.log4j.Level import org.apache.spark.util.{IntParam, MemoryParam, Utils} private def printUsageAndExit(exitCode: Int) { // TODO: It wouldn't be too hard to allow users to submit their app and dependency jars // separately similar to in the YARN client. val usage = s""" |Usage: DriverClient [options] launch <active-master> <jar-url> <main-class> [driver options] |Usage: DriverClient kill <active-master> <driver-id> | |Options: | -c CORES, --cores CORES Number of cores to request (default: $DEFAULT_CORES) | -m MEMORY, --memory MEMORY Megabytes of memory to request (default: $DEFAULT_MEMORY) | -s, --supervise Whether to restart the driver on failure | (default: $DEFAULT_SUPERVISE) | -v, --verbose Print more debugging output """.stripMargin // scalastyle:off println System.err.println(usage) // scalastyle:on println System.exit(exitCode) } } private[deploy] object ClientArguments { val DEFAULT_CORES = 1 val DEFAULT_MEMORY = Utils.DEFAULT_DRIVER_MEM_MB // MB val DEFAULT_SUPERVISE = false def isValidJarUrl(s: String): Boolean = { try { val uri = new URI(s) uri.getScheme != null && uri.getPath != null && uri.getPath.endsWith(".jar") } catch { case _: URISyntaxException => false } } }
Example 31
Source File: HttpUtils.scala From Neutrino with Apache License 2.0 | 5 votes |
package com.ebay.neutrino.util import java.net.URI import com.ebay.neutrino.config.Host import com.typesafe.scalalogging.slf4j.StrictLogging import io.netty.buffer.Unpooled import io.netty.channel.ChannelHandlerContext import io.netty.handler.codec.http.HttpHeaders.{Names, Values} import io.netty.handler.codec.http._ import io.netty.util.CharsetUtil object HttpResponseUtils { import io.netty.handler.codec.http.HttpHeaderNames._ import io.netty.handler.codec.http.HttpVersion._ def error(status: HttpResponseStatus) = { // Allocate some memory for this val buffer = Unpooled.copiedBuffer(s"Failure: $status\r\n", CharsetUtil.UTF_8) // Package a response val response = new DefaultFullHttpResponse(HTTP_1_1, status, buffer) response.headers().set(CONTENT_TYPE, "text/plain; charset=UTF-8") response } }
Example 32
Source File: ConfigurationUtilsTest.scala From Neutrino with Apache License 2.0 | 5 votes |
package com.ebay.neutrino.config import java.net.URI import com.ebay.neutrino.NoResolver import org.scalatest.{FlatSpec, Matchers} class ConfigurationUtilsSpec extends FlatSpec with Matchers { behavior of "Configuration file parsing utils" val SimpleConfig = Configuration.load("proxy-duplicate.conf") it should "handle empty input" in { //assert(Set.empty.size === 0) } it should "parse simple VIP configuration" in { val settings = NeutrinoSettings(SimpleConfig) assert(settings.interfaces.size == 2) //assert(balancercfg.host == "balancer") //assert(balancercfg.defaultPool == "default") // Check the VIPs against expected { val vip = settings.interfaces(0) vip.addresses should be (Seq(ListenerAddress("0.0.0.0", 8080, Transport.HTTP))) vip.protocol should be (Transport.HTTP) vip.poolResolvers should be (Seq(NoResolver)) // TODO vip.handlers shouldNot be (empty) } { val vip = settings.interfaces(1) vip.addresses should be (Seq(ListenerAddress("0.0.0.0", 8088, Transport.HTTP))) vip.protocol should be (Transport.HTTP) vip.poolResolvers should be (Seq(NoResolver)) vip.handlers should be (empty) } } it should "parse simple Pool configuration" in { val balancercfg = LoadBalancer(SimpleConfig) assert(balancercfg.pools.size == 2) { val pool = balancercfg.pools(0) assert(pool.servers.size == 0) assert(pool.health == None) } { // Check the VIP against expected val pool = balancercfg.pools(1) assert(pool.servers.size == 2) assert(pool.protocol == Transport.HTTP) assert(pool.health != None) assert(pool.health.get.monitorType == "type") assert(pool.health.get.monitor == None) assert(pool.health.get.path == new URI("/")) { val server = pool.servers(0) assert(server.host == "localhost") assert(server.port == 8081) } { val server = pool.servers(1) assert(server.host == "127.0.0.1") assert(server.port == 8082) } } } // TODO - VIP w. no default pool // TODO - bad URI }
Example 33
Source File: DummyEndpointResolver.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.resolver import java.net.URI import akka.actor.ActorSystem import org.squbs.env.{DEV, Default, Environment} class DummyServiceResolver(implicit system: ActorSystem) extends Resolver[URI] { override def resolve(svcName: String, env: Environment): Option[URI] = { if (svcName == name) Some(URI.create("http://www.google.com")) else None } override def name: String = "DummyService" } class DummyLocalhostResolver(implicit system: ActorSystem) extends Resolver[URI] { override def resolve(svcName: String, env: Environment = Default): Option[URI] = { require(svcName != null, "Service name cannot be null") require(svcName.length > 0, "Service name must not be blank") env match { case Default | DEV => Some(URI.create("http://localhost:8080")) case _ => throw new RuntimeException("DummyLocalhostResolver cannot support " + env + " environment") } } override def name: String = "DummyLocalhostResolver" }
Example 34
Source File: LineCount.scala From cloud-integration with Apache License 2.0 | 5 votes |
package com.cloudera.spark.cloud.operations import java.net.URI import com.cloudera.spark.cloud.ObjectStoreExample import com.cloudera.spark.cloud.s3.SequentialIOPolicy import com.cloudera.spark.cloud.common.CloudTestKeys._ import com.cloudera.spark.cloud.s3.SequentialIOPolicy import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.spark.{SparkConf, SparkContext} destFsInfo = Some(s"\nFile System $destPath=\n$destFS\n") } } srcFsInfo = Some(s"\nSource File System = $sourceFs\n") } finally { logInfo("Stopping Spark Context") sc.stop() srcFsInfo.foreach(logInfo(_)) destFsInfo.foreach(logInfo(_)) } 0 } def defaultSource: Option[String] = { Some(S3A_CSV_PATH_DEFAULT) } def maybeEnableAnonymousAccess( sparkConf: SparkConf, dest: Option[String]): Unit = { if (dest.isEmpty) { hconf(sparkConf, AWS_CREDENTIALS_PROVIDER, ANONYMOUS_CREDENTIALS) } } }
Example 35
Source File: ProxyCrawler.scala From ProxyCrawler with Apache License 2.0 | 5 votes |
package org.crowdcrawler.proxycrawler import java.io.IOException import java.net.URI import java.security.cert.X509Certificate import com.typesafe.scalalogging.Logger import org.apache.http.client.methods.HttpGet import org.apache.http.impl.client.HttpClients import org.apache.http.ssl.{TrustStrategy, SSLContexts} import org.apache.http.conn.ssl.{NoopHostnameVerifier, SSLConnectionSocketFactory} import org.apache.http.util.EntityUtils import org.crowdcrawler.proxycrawler.crawler.plugins.AbstractPlugin import org.apache.http.HttpHeaders import org.slf4j.LoggerFactory import scala.collection.immutable import scala.collection.mutable class ProxyCrawler(plugins: List[AbstractPlugin]) { *;q=0.8"), (HttpHeaders.ACCEPT_ENCODING, "gzip, deflate, sdch"), (HttpHeaders.ACCEPT_LANGUAGE, "en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4"), (HttpHeaders.CONNECTION, "keep-alive") ) private val CLIENT = { // trust all certificates including self-signed certificates val sslContext = SSLContexts.custom().loadTrustMaterial(null, new TrustStrategy() { def isTrusted(chain: Array[X509Certificate], authType: String) = true }).build() val connectionFactory = new SSLConnectionSocketFactory(sslContext, NoopHostnameVerifier.INSTANCE) HttpClients.custom().setSSLSocketFactory(connectionFactory).build() } def apply(classNames: String*): ProxyCrawler = { val plugins = mutable.ListBuffer.empty[AbstractPlugin] for (className <- classNames) { val clazz = Class.forName("org.crowdcrawler.proxycrawler.crawler.plugins." + className) plugins += clazz.newInstance().asInstanceOf[AbstractPlugin] } new ProxyCrawler(plugins.toList) } private def createRequest(uri: URI, headers: immutable.Map[String, String]): HttpGet = { val request = new HttpGet(uri) for (header <- headers) { request.setHeader(header._1, header._2) } request } }
Example 36
Source File: IpcnOrgPlugin.scala From ProxyCrawler with Apache License 2.0 | 5 votes |
package org.crowdcrawler.proxycrawler.crawler.plugins import org.crowdcrawler.proxycrawler.ProxyInfo import org.jsoup.Jsoup import java.net.URI import java.nio.charset.Charset import scala.collection.mutable final class IpcnOrgPlugin extends AbstractPlugin { val seeds: List[URI] = List( new URI("http://proxy.ipcn.org/proxylist.html"), new URI("http://proxy.ipcn.org/proxylist2.html") ) def extract(html: String): List[ProxyInfo] = { val result = mutable.ListBuffer.empty[ProxyInfo] val doc = Jsoup.parse(html) val preText = doc.select("tr > td > pre").text val rows = preText.split("\n") for (row <- rows) { if (row.matches("[0-9]+(?:\\.[0-9]+){3}:[0-9]+")) { val splitted = row.split(":") val host = splitted(0) val port = splitted(1).toInt result += ProxyInfo(host, port, "HTTP", 0, null, null) } } result.toList } def next(html: String): List[URI] = List() override val responseCharset: Charset = Charset.forName("GB2312") }
Example 37
Source File: CoolProxyNetPlugin.scala From ProxyCrawler with Apache License 2.0 | 5 votes |
package org.crowdcrawler.proxycrawler.crawler.plugins import org.crowdcrawler.proxycrawler.ProxyInfo import org.jsoup.Jsoup import java.net.URI import java.nio.charset.StandardCharsets import sun.misc.BASE64Decoder import scala.collection.mutable import scala.collection.JavaConversions._ import util.control.Breaks._ class CoolProxyNetPlugin extends AbstractPlugin { private final val decoder: BASE64Decoder = new BASE64Decoder val seeds: List[URI] = List(new URI("http://www.cool-proxy.net/proxies/http_proxy_list/page:1")) private def decryptIP(ip: String): String = { val base64Encoded = new StringBuilder for (ch <- ip) { val newChar = if (Character.isAlphabetic(ch)) { if (ch.toLower < 'n') (ch + 13).toChar else (ch - 13).toChar } else { ch } base64Encoded += newChar } val bytes = decoder.decodeBuffer(base64Encoded.toString()) new String(bytes, StandardCharsets.UTF_8) } def extract(html: String): List[ProxyInfo] = { val result = mutable.ListBuffer.empty[ProxyInfo] val doc = Jsoup.parse(html) val rows = doc.select("table > tbody > tr") for (row <- rows) { breakable { val tds = row.select("td") if (tds.isEmpty) break val host = { val hostTmp = tds.get(0).html val startWith = "Base64.decode(str_rot13(\"" val start = hostTmp.indexOf(startWith) if (start == -1) break val end = hostTmp.indexOf("\")))", start) if (end == -1) break val hostEncrypted = hostTmp.substring(start + startWith.length, end) decryptIP(hostEncrypted) } val port = tds.get(1).text.toInt val location = tds.get(3).text val speed = tds.get(8).text.toInt result.add(ProxyInfo(host, port, "HTTP", speed, location, null)) } } result.toList } def next(html: String): List[URI] = { val result = mutable.ListBuffer.empty[URI] val doc = Jsoup.parse(html) val rows = doc.select(".pagination > span > a[href]") for (row <- rows) { val href = row.attr("href") result += new URI("http://www.cool-proxy.net" + href) } result.toList } }
Example 38
Source File: CnProxyComPlugin.scala From ProxyCrawler with Apache License 2.0 | 5 votes |
package org.crowdcrawler.proxycrawler.crawler.plugins import org.crowdcrawler.proxycrawler.ProxyInfo import org.jsoup.Jsoup import java.net.URI import java.nio.charset.Charset import scala.collection.{immutable,mutable} import util.control.Breaks._ private val charNum = immutable.Map( "v" -> "3", "m" -> "4", "a" -> "2", "l" -> "9", "q" -> "0", "b" -> "5", "i" -> "7", "w" -> "6", "r" -> "8", "c" -> "1" ) val seeds: List[URI] = { List( new URI("http://www.cnproxy.com/proxy1.html"), new URI("http://www.cnproxy.com/proxy2.html"), new URI("http://www.cnproxy.com/proxy3.html"), new URI("http://www.cnproxy.com/proxy4.html"), new URI("http://www.cnproxy.com/proxy5.html"), new URI("http://www.cnproxy.com/proxy6.html"), new URI("http://www.cnproxy.com/proxy7.html"), new URI("http://www.cnproxy.com/proxy8.html"), new URI("http://www.cnproxy.com/proxy9.html"), new URI("http://www.cnproxy.com/proxy10.html"), new URI("http://www.cnproxy.com/proxyedu1.html"), new URI("http://www.cnproxy.com/proxyedu2.html") ) } private def decryptPort(encrypted: String): Int = encrypted.split("\\+").map(str => charNum(str)).mkString.toInt def extract(html: String): List[ProxyInfo] = { val result = mutable.ListBuffer.empty[ProxyInfo] val doc = Jsoup.parse(html) val rows = doc.select("#proxylisttb > table").get(2).select("tr") for (i <- 1 until rows.size()) { breakable { // skip the first row val row = rows.get(i) val tds = row.select("td") val host = tds.get(0).text val port = { val pattern = "document.write(\":\"+" val original = tds.get(0).html() val pos1 = original.indexOf(pattern) if (pos1 == -1) break val pos2 = original.indexOf(")</script>", pos1) if (pos2 == -1) break val portStr = original.substring(pos1 + pattern.length, pos2) decryptPort(portStr) } val schema = tds.get(1).text val speeds = tds.get(2).text val speed = { val splitted = speeds.split(",") var sum = 0 for (str <- splitted) { val tmp = str.toInt sum += tmp } sum / splitted.length } val country = tds.get(3).text val proxyInfo = ProxyInfo(host, port, schema, speed, country, null) result += proxyInfo } } result.toList } def next(html: String): List[URI] = List() override val responseCharset: Charset = Charset.forName("GB2312") }
Example 39
Source File: ProxyListOrg.scala From ProxyCrawler with Apache License 2.0 | 5 votes |
package org.crowdcrawler.proxycrawler.crawler.plugins import java.net.URI import org.crowdcrawler.proxycrawler.ProxyInfo import org.jsoup.Jsoup import scala.collection.mutable import scala.collection.JavaConversions._ class ProxyListOrg extends AbstractPlugin { val seeds: List[URI] = List(new URI("https://proxy-list.org/english/index.php?p=1")) def extract(html: String): List[ProxyInfo] = { val result = mutable.ListBuffer.empty[ProxyInfo] val doc = Jsoup.parse(html) val rows = doc.select("div.table-wrap > div > ul") for (row <- rows) { val hostPort = row.select("li.proxy").text() val host = hostPort.split(":")(0) val port = hostPort.split(":")(1).toInt val schema = { val tmp = row.select("li.https").text() if (tmp == "-") "HTTP" else tmp.toUpperCase } val speed = { val tmp = row.select("li.speed").text() if (tmp.contains("kbit")) { (tmp.dropRight(4).toDouble * 1024).toInt } else { 0 } } val location = row.select("li.country-city > div > span.country").first().attr("title") result += ProxyInfo(host, port, schema, speed, location, null) } result.toList } def next(html: String): List[URI] = { val result = mutable.ListBuffer.empty[URI] val rootURL = "https://proxy-list.org/english" val doc = Jsoup.parse(html) val rows = doc.select("div.table-menu > a.item[href]") for (row <- rows) { val href = row.attr("href") result += new URI(rootURL + href.substring(1)) } result.toList } }
Example 40
Source File: SocksProxyNet.scala From ProxyCrawler with Apache License 2.0 | 5 votes |
package org.crowdcrawler.proxycrawler.crawler.plugins import java.net.URI import org.crowdcrawler.proxycrawler.ProxyInfo import org.jsoup.Jsoup import scala.collection.mutable import scala.collection.JavaConversions._ class SocksProxyNet extends AbstractPlugin { val seeds: List[URI] = List(new URI("http://www.socks-proxy.net/")) def extract(html: String): List[ProxyInfo] = { val result = mutable.ListBuffer.empty[ProxyInfo] val doc = Jsoup.parse(html) val rows = doc.select("table#proxylisttable > tbody > tr") for (row <- rows) { val tds = row.select("td") val host = tds.get(0).text val port = tds.get(1).text.toInt val location = tds.get(3).text val schema= tds.get(4).text.toUpperCase result += ProxyInfo(host, port, schema, 0, location, null) } result.toList } def next(html: String): List[URI] = List() }
Example 41
Source File: HttpProxyChecker.scala From ProxyCrawler with Apache License 2.0 | 5 votes |
package org.crowdcrawler.proxycrawler.checker import java.net.URI import java.nio.charset.StandardCharsets import org.apache.http.annotation.ThreadSafe import org.apache.http.HttpHost import org.apache.http.client.methods.HttpGet import org.apache.http.impl.client.HttpClients import org.apache.http.util.EntityUtils @ThreadSafe private[checker] object HttpProxyChecker extends AbstractProxyChecker { private val CLIENT = HttpClients.custom().setMaxConnTotal(AbstractProxyChecker.MAX_CONN) .disableRedirectHandling().build() private val TARGET_URL = new URI("http://www.baidu.com") def check(host: String, port: Int): (Int, Int) = { val request = new HttpGet(TARGET_URL) AbstractProxyChecker.configureRequest(request, Some(new HttpHost(host, port, "http"))) val response = CLIENT.execute(request) val statusCode = response.getStatusLine.getStatusCode val html = EntityUtils.toString(response.getEntity, StandardCharsets.UTF_8) if (statusCode == 200 && html.contains("<title>百度一下")) (statusCode, html.getBytes.length) else (statusCode, -1) } }
Example 42
Source File: SocksProxyChecker.scala From ProxyCrawler with Apache License 2.0 | 5 votes |
package org.crowdcrawler.proxycrawler.checker import java.net import java.net.{InetSocketAddress, Socket, URI} import java.nio.charset.StandardCharsets import javax.net.ssl.{HostnameVerifier, SSLContext} import org.apache.http.annotation.ThreadSafe import org.apache.http.client.methods.HttpGet import org.apache.http.client.protocol.HttpClientContext import org.apache.http.config.RegistryBuilder import org.apache.http.conn.socket.{ConnectionSocketFactory, PlainConnectionSocketFactory} import org.apache.http.conn.ssl.{NoopHostnameVerifier, SSLConnectionSocketFactory} import org.apache.http.impl.client.HttpClients import org.apache.http.impl.conn.PoolingHttpClientConnectionManager import org.apache.http.protocol.HttpContext import org.apache.http.util.EntityUtils @ThreadSafe private[checker] object SocksProxyChecker extends AbstractProxyChecker { private class MyHttpConnectionSocketFactory extends PlainConnectionSocketFactory { override def createSocket(context: HttpContext): Socket = { val socksAddress = context.getAttribute("socks.address").asInstanceOf[InetSocketAddress] val proxy = new net.Proxy(net.Proxy.Type.SOCKS, socksAddress) new Socket(proxy) } } private class MyHttpsConnectionSocketFactory(sslContext: SSLContext, verifier: HostnameVerifier) extends SSLConnectionSocketFactory(sslContext) { override def createSocket(context: HttpContext): Socket = { val socksAddress = context.getAttribute("socks.address").asInstanceOf[InetSocketAddress] val proxy = new net.Proxy(net.Proxy.Type.SOCKS, socksAddress) new Socket(proxy) } } private val CLIENT = { val reg = RegistryBuilder.create[ConnectionSocketFactory]() .register("http", new MyHttpConnectionSocketFactory()) .register("https", new MyHttpsConnectionSocketFactory(HttpsProxyChecker.SSL_CONTEXT, NoopHostnameVerifier.INSTANCE)) .build() val cm = new PoolingHttpClientConnectionManager(reg) cm.setMaxTotal(AbstractProxyChecker.MAX_CONN) HttpClients.custom().setConnectionManager(cm).disableRedirectHandling().build() } private val TARGET_URL = new URI("http://www.baidu.com") def check(host: String, port: Int): (Int, Int) = { val request = new HttpGet(TARGET_URL) AbstractProxyChecker.configureRequest(request) val httpContext = { val socksAddress = new InetSocketAddress(host, port) val context = HttpClientContext.create() context.setAttribute("socks.address", socksAddress) context } val response = CLIENT.execute(request, httpContext) val statusCode = response.getStatusLine.getStatusCode val html = EntityUtils.toString(response.getEntity, StandardCharsets.UTF_8) if (statusCode == 200 && html.contains("<title>百度一下")) (statusCode, html.getBytes.length) else (statusCode, -1) } }
Example 43
Source File: HttpsProxyChecker.scala From ProxyCrawler with Apache License 2.0 | 5 votes |
package org.crowdcrawler.proxycrawler.checker import java.net.URI import java.nio.charset.StandardCharsets import java.security.cert.X509Certificate import org.apache.http.HttpHost import org.apache.http.annotation.ThreadSafe import org.apache.http.client.methods.HttpGet import org.apache.http.conn.ssl.{NoopHostnameVerifier, SSLConnectionSocketFactory} import org.apache.http.impl.client.HttpClients import org.apache.http.ssl.{TrustStrategy, SSLContexts} import org.apache.http.util.EntityUtils @ThreadSafe private[checker] object HttpsProxyChecker extends AbstractProxyChecker { // trust all certificates including self-signed certificates private[checker] val SSL_CONTEXT = SSLContexts.custom().loadTrustMaterial(null, new TrustStrategy() { def isTrusted(chain: Array[X509Certificate], authType: String) = true }).build() private val CLIENT = { val connectionFactory = new SSLConnectionSocketFactory(SSL_CONTEXT, NoopHostnameVerifier.INSTANCE) HttpClients.custom().setSSLSocketFactory(connectionFactory).setMaxConnTotal(AbstractProxyChecker.MAX_CONN) .disableRedirectHandling().build() } private val TARGET_URL = new URI("https://www.google.com") def check(host: String, port: Int): (Int, Int) = { val request = new HttpGet(TARGET_URL) AbstractProxyChecker.configureRequest(request, Some(new HttpHost(host, port, "http"))) val response = CLIENT.execute(request) val statusCode = response.getStatusLine.getStatusCode val html = EntityUtils.toString(response.getEntity, StandardCharsets.UTF_8) if (statusCode == 200 && html.contains("<title>Google</title>")) (statusCode, html.getBytes.length) else (statusCode, -1) } }
Example 44
Source File: DownloadableFile.scala From polynote with Apache License 2.0 | 5 votes |
package polynote.kernel.util import java.io.{File, FileInputStream, InputStream} import java.net.{HttpURLConnection, URI} import java.util.ServiceLoader import scala.collection.JavaConverters._ import cats.effect.IO import zio.{RIO, ZIO} import zio.blocking.{Blocking, effectBlocking} trait DownloadableFile { def openStream: IO[InputStream] def size: IO[Long] } trait DownloadableFileProvider { def getFile(uri: URI): Option[DownloadableFile] = provide.lift(uri) def provide: PartialFunction[URI, DownloadableFile] def protocols: Seq[String] object Supported { def unapply(arg: URI): Option[URI] = { Option(arg.getScheme).flatMap(scheme => protocols.find(_ == scheme)).map(_ => arg) } } } object DownloadableFileProvider { private lazy val unsafeLoad = ServiceLoader.load(classOf[DownloadableFileProvider]).iterator.asScala.toList def isSupported(uri: URI): RIO[Blocking, Boolean] = effectBlocking(unsafeLoad).map { providers => Option(uri.getScheme).exists(providers.flatMap(_.protocols).contains) } def getFile(uri: URI): ZIO[Blocking, Throwable, DownloadableFile] = { effectBlocking(unsafeLoad).map { providers => for { scheme <- Option(uri.getScheme) provider <- providers.find(_.protocols.contains(scheme)) file <- provider.getFile(uri) } yield file }.someOrFail(new Exception(s"Unable to find provider for uri $uri")) } } class HttpFileProvider extends DownloadableFileProvider { override def protocols: Seq[String] = Seq("http", "https") override def provide: PartialFunction[URI, DownloadableFile] = { case Supported(uri) => HTTPFile(uri) } } case class HTTPFile(uri: URI) extends DownloadableFile { override def openStream: IO[InputStream] = IO(uri.toURL.openStream()) override def size: IO[Long] = IO(uri.toURL.openConnection().asInstanceOf[HttpURLConnection]).bracket { conn => IO { conn.setRequestMethod("HEAD") conn.getContentLengthLong } } { conn => IO(conn.disconnect())} } class LocalFileProvider extends DownloadableFileProvider { override def protocols: Seq[String] = Seq("file") override def provide: PartialFunction[URI, DownloadableFile] = { case Supported(uri) => LocalFile(uri) } } case class LocalFile(uri: URI) extends DownloadableFile { lazy val file = new File(uri) override def openStream: IO[InputStream] = IO(new FileInputStream(file)) override def size: IO[Long] = IO.pure(file.length()) }
Example 45
Source File: ClassIndexer.scala From polynote with Apache License 2.0 | 5 votes |
package polynote.kernel.interpreter.scal import java.io.File import java.net.URI import java.util.concurrent.atomic.AtomicReference import java.util.function.UnaryOperator import io.github.classgraph.ClassGraph import polynote.kernel.ScalaCompiler import polynote.kernel.util.pathOf import zio.blocking.{Blocking, effectBlocking} import zio.{Fiber, RIO, UIO, ZIO} import scala.collection.immutable.TreeMap trait ClassIndexer { def findMatches(name: String): UIO[Map[String, List[(Int, String)]]] def await: UIO[Unit] } object ClassIndexer { def default: ZIO[Blocking with ScalaCompiler.Provider, Nothing, ClassIndexer] = SimpleClassIndexer() } class SimpleClassIndexer(ref: AtomicReference[TreeMap[String, List[(Int, String)]]], process: Fiber[Throwable, Any]) extends ClassIndexer { override def findMatches(name: String): UIO[Map[String, List[(Int, String)]]] = ZIO.effectTotal(ref.get).map(_.range(name, name + Char.MaxValue)) override def await: UIO[Unit] = process.await.unit } object SimpleClassIndexer { def apply(): ZIO[Blocking with ScalaCompiler.Provider, Nothing, SimpleClassIndexer] = { def buildIndex( priorityDependencies: Array[File], classPath: Array[File], classes: AtomicReference[TreeMap[String, List[(Int, String)]]] ) = effectBlocking { import scala.collection.JavaConverters._ val lastPriority = priorityDependencies.length + classPath.length val priorities = (priorityDependencies ++ classPath.diff(priorityDependencies)).distinct.zipWithIndex.toMap val classGraph = new ClassGraph().overrideClasspath(priorityDependencies ++ classPath: _*).enableClassInfo() val scanResult = classGraph.scan() scanResult.getAllClasses.iterator().asScala .filter(_.isPublic) .filterNot(_.isSynthetic) .filterNot(_.getSimpleName.contains("$")) .foreach { classInfo => val priority = priorities.getOrElse(classInfo.getClasspathElementFile, lastPriority) classes.updateAndGet(new UnaryOperator[TreeMap[String, List[(Int, String)]]] { def apply(t: TreeMap[String, List[(Int, String)]]): TreeMap[String, List[(Int, String)]] = t + (classInfo.getSimpleName -> ((priority -> classInfo.getName) :: t.getOrElse(classInfo.getSimpleName, Nil))) }) } classes.get() } def javaLibraryPath = Option(classOf[Object].getResource("Object.class")).flatMap { case url if url.getProtocol == "jar" => try Some(new File(new URI(url.getPath.stripSuffix("!/java/lang/Object.class")))) catch { case err: Throwable => None } case url if url.getProtocol == "file" => try Some(new File(url.toURI)) catch { case err: Throwable => None } case _ => None } for { classPath <- ScalaCompiler.settings.map(_.classpath.value.split(File.pathSeparatorChar).map(new File(_))) deps <- ScalaCompiler.dependencies priorities = new File(pathOf(classOf[List[_]]).toURI) :: javaLibraryPath.toList ::: deps indexRef = new AtomicReference[TreeMap[String, List[(Int, String)]]](new TreeMap) process <- buildIndex(priorities.toArray, classPath, indexRef).forkDaemon } yield new SimpleClassIndexer(indexRef, process) } }
Example 46
Source File: MemoryRepository.scala From polynote with Apache License 2.0 | 5 votes |
package polynote.testing.repository import java.io.FileNotFoundException import java.net.URI import polynote.kernel.{BaseEnv, GlobalEnv, NotebookRef, TaskB} import polynote.messages._ import polynote.server.repository.NotebookRepository import polynote.testing.kernel.MockNotebookRef import zio.{RIO, Task, UIO, ZIO} import scala.collection.mutable class MemoryRepository extends NotebookRepository { private val notebooks = new mutable.HashMap[String, Notebook]() def notebookExists(path: String): UIO[Boolean] = ZIO.effectTotal(notebooks contains path) def notebookURI(path: String): UIO[Option[URI]] = ZIO.effectTotal(if (notebooks contains path) Option(new URI(s"memory://$path")) else None) def loadNotebook(path: String): Task[Notebook] = ZIO.effectTotal(notebooks.get(path)).get.mapError(err => new FileNotFoundException(path)) def openNotebook(path: String): RIO[BaseEnv with GlobalEnv, NotebookRef] = loadNotebook(path).flatMap(nb => MockNotebookRef(nb, tup => saveNotebook(tup._2))) def saveNotebook(nb: Notebook): UIO[Unit] = ZIO.effectTotal(notebooks.put(nb.path, nb)) def listNotebooks(): UIO[List[String]] = ZIO.effectTotal(notebooks.keys.toList) def createNotebook(path: String, maybeUriOrContent: Option[String]): UIO[String] = ZIO.effectTotal(notebooks.put(path, Notebook(path, ShortList.of(), None))).as(path) def createAndOpen(path: String, notebook: Notebook, version: Int): RIO[BaseEnv with GlobalEnv, NotebookRef] = ZIO.effectTotal(notebooks.put(path, notebook)).flatMap { _ => MockNotebookRef(notebook, tup => saveNotebook(tup._2), version) } def initStorage(): TaskB[Unit] = ZIO.unit def renameNotebook(path: String, newPath: String): Task[String] = loadNotebook(path).map { notebook => notebooks.put(newPath, notebook) notebooks.remove(path) newPath } def copyNotebook(path: String, newPath: String): TaskB[String] = loadNotebook(path).map { notebook => notebooks.put(newPath, notebook) newPath } def deleteNotebook(path: String): TaskB[Unit] = ZIO.effectTotal(notebooks.get(path)).flatMap { case None => ZIO.fail(new FileNotFoundException(path)) case Some(_) => ZIO.effectTotal(notebooks.remove(path)).unit } }
Example 47
Source File: License.scala From recogito2 with Apache License 2.0 | 5 votes |
package services.document import java.net.URI sealed trait License { val name: String; val acronym: String; val uri: Option[URI]; val isOpen: Boolean; val isCC: Boolean } case object CC0 extends License { val name = "CC0 1.0 Universal (CC0 1.0)" val acronym = "CC0 1.0" val uri = Some(new URI("http://creativecommons.org/publicdomain/zero/1.0/")) val isOpen = true val isCC = true } case object CC_BY extends License { val name = "CC Attribution 4.0 International (CC BY 4.0)" val acronym = "CC BY 4.0" val uri = Some(new URI("http://creativecommons.org/licenses/by/4.0/")) val isOpen = true val isCC = true } case object CC_BY_SA extends License { val name = "CC Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)" val acronym = "CC BY-SA 4.0" val uri = Some(new URI("http://creativecommons.org/licenses/by-sa/4.0/")) val isOpen = true val isCC = true } case object CC_BY_NC extends License { val name = "CC Attribution-NonCommerical 4.0 International (CC BY-NC 4.0)" val acronym = "CC BY-NC 4.0" val uri = Some(new URI("http://creativecommons.org/licenses/by-nc/4.0/")) val isOpen = true val isCC = true } case object CC_BY_NC_SA extends License { val name = "CC Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)" val acronym = "CC BY-NC-SA 4.0" val uri = Some(new URI("http://creativecommons.org/licenses/by-nc-sa/4.0/")) val isOpen = true val isCC = true } case object OUT_OF_COPYRIGHT extends License { val name = "Out of copyright in territory of publication" val acronym = "OUT OF COPYRIGHT" val uri = None val isOpen = true val isCC = false } case object IN_COPYRIGHT extends License { val name = "In copyright in territory of publication" val acronym = "IN COPYRIGHT" val uri = None val isOpen = false val isCC = false } case object OTHER extends License { val name = "Other" val acronym = "OTHER" val uri = None val isOpen = false val isCC = false } object License { lazy val values: Seq[License] = Seq( CC0, CC_BY, CC_BY_SA, CC_BY_NC, CC_BY_NC_SA, OUT_OF_COPYRIGHT, IN_COPYRIGHT, OTHER ) def fromAcronym(acronym: String) = values.find(_.acronym == acronym) }
Example 48
Source File: LandingController.scala From recogito2 with Apache License 2.0 | 5 votes |
package controllers.landing import com.mohiva.play.silhouette.api.Silhouette import controllers.{ HasConfig, HasUserService, HasVisitLogging, HasPrettyPrintJSON, Security } import java.io.FileInputStream import javax.inject.{ Inject, Singleton } import java.net.URI import org.webjars.play.WebJarsUtil import play.api.Configuration import play.api.i18n.I18nSupport import play.api.libs.json.{Json, JsObject} import play.api.mvc.{Action, AbstractController, ControllerComponents} import scala.concurrent.ExecutionContext import services.annotation.AnnotationService import services.contribution.ContributionService import services.document.DocumentService import services.user.UserService import services.visit.VisitService @Singleton class LandingController @Inject() ( val components: ControllerComponents, val config: Configuration, val annotations: AnnotationService, val contributions: ContributionService, val documents: DocumentService, val users: UserService, val silhouette: Silhouette[Security.Env], implicit val ctx: ExecutionContext, implicit val visits: VisitService, implicit val webjars: WebJarsUtil ) extends AbstractController(components) with HasConfig with HasUserService with HasVisitLogging with HasPrettyPrintJSON with I18nSupport { def index = silhouette.UserAwareAction { implicit request => // Temporary hack only request.queryString.get("lang").flatMap(_.headOption) match { case Some(lang) => Redirect(routes.LandingController.index).withLang(play.api.i18n.Lang(lang)) case None => request.identity match { case Some(user) => Redirect(controllers.my.routes.WorkspaceController.workspace(user.username)) case None => logPageView() Ok(views.html.landing.index()) } } } def getStats() = silhouette.UnsecuredAction.async { implicit request => val fAnnotations = annotations.countTotal() val fEdits = contributions.countLast24hrs() val fUsers = users.countUsers() val f = for { annotations <- fAnnotations edits <- fEdits users <- fUsers } yield (annotations, edits, users) f.map { case (annotations, edits, users) => jsonOk(Json.obj( "annotations" -> annotations, "edits" -> edits, "users" -> users )) } } def sitemap() = Action.async { implicit request => documents.listOwnersWithPublicDocuments().map { users => val baseURL = routes.LandingController.index().absoluteURL() val sitemap = users.map(user => s"${baseURL}${user}").mkString("\n") Ok(sitemap).as("text/plain") } } def robots() = Action { implicit request => val sitemapURL = routes.LandingController.sitemap().absoluteURL() Ok(s"SITEMAP: ${sitemapURL}").as("text/plain") } def swaggerConfig() = Action { implicit request => val json = Json.parse(new FileInputStream("conf/swagger.json")) val baseURL = new URI(routes.LandingController.index.absoluteURL) val host = if (baseURL.getPort == -1) baseURL.getHost else s"${baseURL.getHost}:${baseURL.getPort}" jsonOk(json.as[JsObject] ++ Json.obj("host" -> host)) } }
Example 49
Source File: URLHelper.scala From Mastering-Spark-for-Data-Science with MIT License | 5 votes |
package com.gravity.goose.utils import com.gravity.goose.text.{StringReplacement, HashUtils} import java.net.{URI, MalformedURLException, URL} import org.apache.http.client.methods.HttpGet def getCleanedUrl(urlToCrawl: String): Option[ParsingCandidate] = { val finalURL = if (urlToCrawl.contains("#!")) ESCAPED_FRAGMENT_REPLACEMENT.replaceAll(urlToCrawl) else urlToCrawl try { val url = new URL(finalURL) val linkhash = HashUtils.md5(finalURL) Some(ParsingCandidate(finalURL, linkhash, url)) } catch { case e: MalformedURLException => { warn("{0} - is a malformed URL and cannot be processed", urlToCrawl) None } case unknown: Exception => { critical("Unable to process URL: {0} due to an unexpected exception:\n\tException Type: {1}\n\tException Message: {2}\n\tException Stack:\n{3}", urlToCrawl, unknown.getClass.getCanonicalName, unknown.getMessage, unknown.getStackTraceString) None } } } def tryToURL(url: String): Option[URL] = { val finalUrl = if (url.contains("#!")) { ESCAPED_FRAGMENT_REPLACEMENT.replaceAll(url) } else { url } try { Some(new URL(finalUrl)) } catch { case _: Exception => None } } def tryToURI(url: String): Option[URI] = { val finalUrl = if (url.contains("#!")) { ESCAPED_FRAGMENT_REPLACEMENT.replaceAll(url) } else { url } try { Some(URI.create(finalUrl)) } catch { case _: Exception => None } } def tryToHttpGet(url: String): Option[HttpGet] = { tryToURI(url) match { case Some(uri) => Some(new HttpGet(uri)) case None => None } } }
Example 50
Source File: Load.scala From piglet with Apache License 2.0 | 5 votes |
package dbis.piglet.op import java.net.URI import dbis.piglet.Piglet.Lineage import dbis.piglet.expr.{Ref, Value} import dbis.piglet.schema.Schema import dbis.piglet.tools.{CliParams, HDFSService} import scala.collection.mutable import scala.util.{Failure, Success, Try} override def lineageString: String = linStr getOrElse { s"""LOAD%$file%${lastModified match { case None => -1 case Some(Failure(_)) => -2 case Some(Success(v)) => v }}%""" + super.lineageString } override def toString: String = s"""LOAD | out = ${outputs.map(_.name).mkString(",")} | file = ${file.toString} | func = $loaderFunc | outSchema = $schema""".stripMargin override def resolveReferences(mapping: mutable.Map[String, Ref]): Unit = { // we replace only the filename if (file.toString.startsWith("$") && mapping.contains(file.toString)) { mapping(file.toString) match { case Value(v) => val s = v.toString if (s(0) == '"') file = s.substring(1, s.length-1) // file = new URI(s.substring(1, s.length-1)) case _ => } } } }
Example 51
Source File: RDFLoad.scala From piglet with Apache License 2.0 | 5 votes |
package dbis.piglet.op import dbis.piglet.schema._ import java.net.URI import org.kiama.rewriting.Rewriter.everything import scala.collection.mutable case class RDFLoad(private val out: Pipe, uri: URI, grouped: Option[String]) extends PigOperator(out) { schema = if (grouped.isDefined) { if (RDFLoad.groupedSchemas.contains(grouped.get)){ Some(RDFLoad.groupedSchemas(grouped.get)) } else { throw new IllegalArgumentException(grouped.get + " is not a valid RDF grouping column") } } else { RDFLoad.plainSchema } def BGPFilterIsReachable: Boolean = { val isBGPFilter: PartialFunction[Any, Boolean] = {case t: Any => t.isInstanceOf[BGPFilter]} everything[Boolean] ("BGPFilterIsReachable", false) { (old: Boolean, newvalue: Boolean) => old || newvalue } (isBGPFilter) (this) } } object RDFLoad { // lazy final val groupedSchemas = { def groupedSchemas = { val m = mutable.Map[String, Schema]() val columns = List[String]("subject", "predicate", "object") for (grouping_column <- columns) { val fields = columns.filterNot(_ == grouping_column).map { Field(_, Types.CharArrayType) }.toArray m(grouping_column) = Schema( BagType( TupleType( Array( Field(grouping_column, Types.CharArrayType), Field("stmts", BagType( TupleType( fields))))))) } m } }
Example 52
Source File: Store.scala From piglet with Apache License 2.0 | 5 votes |
package dbis.piglet.op import java.net.URI override def lineageString: String = { s"""STORE%$file%""" + super.lineageString } override def toString = s"""STORE | in = $inPipeName | func = $func | file = $file | params = ${if(params != null) params.mkString(",") else "null" }""".stripMargin }
Example 53
Source File: SparkStreamingCodeGenStrategy.scala From piglet with Apache License 2.0 | 5 votes |
package dbis.piglet.codegen.spark import java.net.URI import dbis.piglet.codegen.scala_lang.ScalaCodeGenStrategy import dbis.piglet.codegen.{CodeEmitter, CodeGenContext, CodeGenTarget} import dbis.piglet.mm.ProfilerSettings import dbis.piglet.op._ import dbis.piglet.plan.DataflowPlan import dbis.piglet.tools.Conf class SparkStreamingCodeGenStrategy extends ScalaCodeGenStrategy { override val target = CodeGenTarget.SparkStreaming // override val emitters = super.emitters + ( // s"$pkg.Load" -> new StreamLoadEmitter, // s"$pkg.Dump" -> new StreamDumpEmitter, // s"$pkg.Store" -> new StreamStoreEmitter, // s"$pkg.Grouping" -> new StreamGroupingEmitter, // s"$pkg.OrderBy" -> new StreamOrderByEmitter, // s"$pkg.Distinct" -> new StreamDistinctEmitter, // s"$pkg.Window" -> new StreamWindowEmitter, // s"$pkg.SocketRead" -> new StreamSocketReadEmitter // ) override def emitterForNode[O <: PigOperator](op: O): CodeEmitter[O] = { val emitter = op match { case _: Load => StreamLoadEmitter.instance case _: Dump => StreamDumpEmitter.instance case _: Store => StreamStoreEmitter.instance case _: Grouping => StreamGroupingEmitter.instance case _: OrderBy => StreamOrderByEmitter.instance case _: Distinct => StreamDistinctEmitter.instance case _: Window => StreamWindowEmitter.instance case _: SocketRead => StreamSocketReadEmitter.instance case _ => super.emitterForNode(op) } emitter.asInstanceOf[CodeEmitter[O]] } var forceTermin = if(plan.operators.isEmpty) false else true plan.sourceNodes.foreach(op => forceTermin &= op.isInstanceOf[Load]) var params = Map("name" -> "Starting Query") if (forceTermin) params += ("forceTermin" -> forceTermin.toString) CodeEmitter.render(""" ssc.start() | ssc.awaitTermination<if (forceTermin)>OrTimeout(10000)<else>()<endif> | } |}""".stripMargin, params) } }
Example 54
Source File: FlinkStreamingCodeGenStrategy.scala From piglet with Apache License 2.0 | 5 votes |
package dbis.piglet.codegen.flink import java.net.URI import dbis.piglet.Piglet.Lineage import dbis.piglet.codegen.flink.emitter._ import dbis.piglet.codegen.{CodeEmitter, CodeGenContext, CodeGenTarget} import dbis.piglet.mm.ProfilerSettings import dbis.piglet.op._ import dbis.piglet.plan.DataflowPlan class FlinkStreamingCodeGenStrategy extends FlinkCodeGenStrategy { override val target = CodeGenTarget.FlinkStreaming override def emitterForNode[O <: PigOperator](op: O): CodeEmitter[O] = { val emitter = op match { case _: Load => StreamLoadEmitter.instance case _: Store => StreamStoreEmitter.instance case _: SocketRead => SocketReadEmitter.instance case _: SocketWrite => SocketWriteEmitter.instance case _: Filter => StreamFilterEmitter.instance case _: Foreach => StreamForeachEmitter.instance case _: Grouping => StreamGroupingEmitter.instance case _: OrderBy => StreamOrderByEmitter.instance case _: Accumulate => StreamAccumulateEmitter.instance case _: Join => StreamJoinEmitter.instance case _: Cross => StreamCrossEmitter.instance case _: Window => StreamWindowEmitter.instance case _: WindowApply => StreamWindowApplyEmitter.instance case _: Distinct => StreamDistinctEmitter.instance case _: Sample => StreamSampleEmitter.instance case _ => super.emitterForNode(op) } emitter.asInstanceOf[CodeEmitter[O]] } override def emitHeader2(ctx: CodeGenContext, scriptName: String, profiling: Option[ProfilerSettings] = None): String = { CodeEmitter.render(""" def main(args: Array[String]) {<\n>""", Map.empty) } override def emitFooter(ctx: CodeGenContext, plan: DataflowPlan, profiling: Option[URI] = None): String = { val params = Map("name" -> "Starting Query") CodeEmitter.render(""" env.execute("<name>") |<if (hook)> | shutdownHook() |<endif> | } |}""".stripMargin, params) } }
Example 55
Source File: FlinkCodeGenStrategy.scala From piglet with Apache License 2.0 | 5 votes |
package dbis.piglet.codegen.flink import java.net.URI import dbis.piglet.codegen.flink.emitter._ import dbis.piglet.codegen.scala_lang.ScalaCodeGenStrategy import dbis.piglet.codegen.{CodeEmitter, CodeGenContext, CodeGenTarget} import dbis.piglet.mm.ProfilerSettings import dbis.piglet.op._ import dbis.piglet.plan.DataflowPlan class FlinkCodeGenStrategy extends ScalaCodeGenStrategy { override val target = CodeGenTarget.FlinkStreaming // override val emitters = super.emitters + ( // s"$pkg.Load" -> FlinkLoadEmitter.instance, // s"$pkg.Dump" -> FlinkDumpEmitter.instance, // s"$pkg.Store" -> FlinkStoreEmitter.instance // ) override def emitterForNode[O <: PigOperator](op: O): CodeEmitter[O] = { val emitter = op match { case _: Load => LoadEmitter.instance case _: Dump => DumpEmitter.instance case _: Store => StoreEmitter.instance case _: Grouping => GroupingEmitter.instance case _: OrderBy => OrderByEmitter.instance case _: Join => JoinEmitter.instance case _: Limit => LimitEmitter.instance case _: StreamOp => StreamOpEmitter.instance case _: Accumulate => AccumulateEmitter.instance case _ => super.emitterForNode(op) } emitter.asInstanceOf[CodeEmitter[O]] } override def emitHeader2(ctx: CodeGenContext, scriptName: String, profiling: Option[ProfilerSettings] = None): String = { CodeEmitter.render(""" def main(args: Array[String]) {<\n>""", Map.empty) } override def emitFooter(ctx: CodeGenContext, plan: DataflowPlan, profiling: Option[URI] = None): String = { val params = Map("name" -> "Starting Query") CodeEmitter.render("""<if (hook)> | shutdownHook() |<endif> | } |}""".stripMargin, params) } }
Example 56
Source File: WSPlayListener.scala From scala-loci with Apache License 2.0 | 5 votes |
package loci package communicator package ws.akka import java.net.URI import java.util.concurrent.ConcurrentLinkedQueue import play.api.mvc.Security.AuthenticatedRequest import play.api.mvc.{RequestHeader, Results, WebSocket} import scala.concurrent.Future import scala.util.{Failure, Success, Try} private object WSPlayListener { locally(WSPlayListener) def apply[P <: WS: WSProtocolFactory](properties: WS.Properties) = new Listener[P] with WebSocketHandler { private def webSocket(authenticated: Either[Option[String], Any]) = WebSocket { request => val uri = new URI(s"dummy://${request.host}") val host = uri.getHost val port = uri.getPort val certificates = request.clientCertificateChain.toSeq.flatten val isAuthenticated = authenticated.isRight || compatibility.either.left(authenticated).nonEmpty || (request.secure && certificates.nonEmpty) val isProtected = request.secure val isEncrypted = request.secure val ws = implicitly[WSProtocolFactory[P]] make ( request.uri, Option(host), if (port < 0) None else Some(port), this, isAuthenticated, isEncrypted, isProtected, Some(Left(request)), authenticated.left.toOption.flatten toRight certificates) Future successful (ws match { case Failure(exception) => connectionEstablished(Failure(exception)) Left(Results.NotFound) case Success(ws) => Right(WSPlayHandler handleWebSocket ( Future successful ws, properties, connectionEstablished)) }) } def apply(authenticatedName: String) = webSocket(Left(Some(authenticatedName))) def apply(authenticatedName: Option[String]) = webSocket(Left(authenticatedName)) def apply(request: RequestHeader) = request match { case request: AuthenticatedRequest[_, _] => request.user match { case user: String => webSocket(Left(Some(user)))(request) case user => webSocket(Right(user))(request) } case _ => webSocket(Left(None))(request) } private val connected = new ConcurrentLinkedQueue[Connected[P]] private def connectionEstablished(connection: Try[Connection[P]]) = { val iterator = connected.iterator while (iterator.hasNext) iterator.next().fire(connection) } protected def startListening(connectionEstablished: Connected[P]): Try[Listening] = { connected.add(connectionEstablished) Success(new Listening { def stopListening(): Unit = connected.remove(connectionEstablished) }) } } }
Example 57
Source File: AddJar.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.magic.builtin import java.io.{File, PrintStream} import java.net.{URL, URI} import java.nio.file.{Files, Paths} import java.util.zip.ZipFile import org.apache.toree.magic._ import org.apache.toree.magic.builtin.AddJar._ import org.apache.toree.magic.dependencies._ import org.apache.toree.utils.{ArgumentParsingSupport, DownloadSupport, LogLike, FileUtils} import com.typesafe.config.Config import org.apache.hadoop.fs.Path import org.apache.toree.plugins.annotations.Event object AddJar { val HADOOP_FS_SCHEMES = Set("hdfs", "s3", "s3n", "file") private var jarDir:Option[String] = None def getJarDir(config: Config): String = { jarDir.getOrElse({ jarDir = Some( if(config.hasPath("jar_dir") && Files.exists(Paths.get(config.getString("jar_dir")))) { config.getString("jar_dir") } else { FileUtils.createManagedTempDirectory("toree_add_jars").getAbsolutePath } ) jarDir.get }) } } class AddJar extends LineMagic with IncludeInterpreter with IncludeOutputStream with DownloadSupport with ArgumentParsingSupport with IncludeKernel with IncludePluginManager with IncludeConfig with LogLike { // Option to mark re-downloading of jars private val _force = parser.accepts("f", "forces re-download of specified jar") // Option to mark re-downloading of jars private val _magic = parser.accepts("magic", "loads jar as a magic extension") // Lazy because the outputStream is not provided at construction private def printStream = new PrintStream(outputStream) ) } else { downloadFile( new URL(jarRemoteLocation), new File(downloadLocation).toURI.toURL ) } // Report download finished printStream.println(s"Finished download of $jarName") } else { printStream.println(s"Using cached version of $jarName") } // validate jar file if(! isValidJar(fileDownloadLocation)) { throw new IllegalArgumentException(s"Jar '$jarName' is not valid.") } if (_magic) { val plugins = pluginManager.loadPlugins(fileDownloadLocation) pluginManager.initializePlugins(plugins) } else { kernel.addJars(fileDownloadLocation.toURI) } } }
Example 58
Source File: HazelCastConnection.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.hazelcast import java.io.{File, FileNotFoundException} import java.net.URI import java.util.{Properties, UUID} import com.datamountaineer.streamreactor.connect.hazelcast.config.{HazelCastConnectionConfig, HazelCastSocketConfig} import com.hazelcast.cache.HazelcastCachingProvider import com.hazelcast.client.HazelcastClient import com.hazelcast.client.config.{ClientConfig, ClientNetworkConfig, SocketOptions} import com.hazelcast.config.{GroupConfig, SSLConfig} import com.hazelcast.core.HazelcastInstance import javax.cache.{CacheManager, Caching} import scala.collection.JavaConverters._ object HazelCastConnection { def buildClient(config: HazelCastConnectionConfig): HazelcastInstance = { val clientConfig = new ClientConfig val networkConfig = clientConfig.getNetworkConfig if (config.sslEnabled) { setSSLOptions(config) networkConfig.setSSLConfig(new SSLConfig().setEnabled(true)) } networkConfig.setAddresses(config.members.toList.asJava) val groupConfig = new GroupConfig(config.group, config.pass) clientConfig.setGroupConfig(groupConfig) buildSocketOptions(networkConfig, config.socketConfig) clientConfig.setInstanceName(config.group + "-kafka-connect-" + UUID.randomUUID().toString) HazelcastClient.newHazelcastClient(clientConfig) } private def buildSocketOptions(clientNetworkConfig: ClientNetworkConfig, socketConfig: HazelCastSocketConfig): SocketOptions = { val socketOptions = clientNetworkConfig.getSocketOptions socketOptions.setKeepAlive(socketConfig.keepAlive) socketOptions.setTcpNoDelay(socketConfig.tcpNoDelay) socketOptions.setReuseAddress(socketConfig.reuseAddress) socketOptions.setLingerSeconds(socketConfig.lingerSeconds) socketOptions.setBufferSize(socketConfig.bufferSize) socketOptions } def getCacheManager(client: HazelcastInstance, name: String) : CacheManager = { val instanceName = client.getName() val cachingProvider = Caching.getCachingProvider() // Create Properties instance pointing to a named HazelcastInstance val properties = new Properties() properties.setProperty(HazelcastCachingProvider.HAZELCAST_INSTANCE_NAME, instanceName) val cacheManagerName = new URI(name ) val cacheManager = cachingProvider.getCacheManager(cacheManagerName, null, properties ) cacheManager } def setSSLOptions(config: HazelCastConnectionConfig) = { config.keyStoreLocation match { case Some(path) => if (!new File(path).exists) { throw new FileNotFoundException(s"Keystore not found in: $path") } System.setProperty("javax.net.ssl.keyStorePassword", config.keyStorePassword.getOrElse("")) System.setProperty("javax.net.ssl.keyStore", path) System.setProperty("javax.net.ssl.keyStoreType", config.keyStoreType.getOrElse("jks")) case None => } config.trustStoreLocation match { case Some(path) => if (!new File(path).exists) { throw new FileNotFoundException(s"Truststore not found in: $path") } System.setProperty("javax.net.ssl.trustStorePassword", config.trustStorePassword.getOrElse("")) System.setProperty("javax.net.ssl.trustStore", path) System.setProperty("javax.net.ssl.trustStoreType", config.trustStoreType.getOrElse("jks")) case None => } } }
Example 59
Source File: DTLSConnectionFn.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.coap.connection import java.io.FileInputStream import java.net.{ConnectException, InetAddress, InetSocketAddress, URI} import java.security.cert.Certificate import java.security.{KeyStore, PrivateKey} import com.datamountaineer.streamreactor.connect.coap.configs.{CoapConstants, CoapSetting} import com.typesafe.scalalogging.StrictLogging import org.eclipse.californium.core.CoapClient import org.eclipse.californium.core.coap.CoAP import org.eclipse.californium.core.network.CoapEndpoint import org.eclipse.californium.core.network.config.NetworkConfig import org.eclipse.californium.scandium.DTLSConnector import org.eclipse.californium.scandium.config.DtlsConnectorConfig import org.eclipse.californium.scandium.dtls.cipher.CipherSuite import org.eclipse.californium.scandium.dtls.pskstore.InMemoryPskStore def discoverServer(address: String, uri: URI): URI = { val client = new CoapClient(s"${uri.getScheme}://$address:${uri.getPort.toString}/.well-known/core") client.useNONs() val response = client.get() if (response != null) { logger.info(s"Discovered Server ${response.advanced().getSource.toString}.") new URI(uri.getScheme, uri.getUserInfo, response.advanced().getSource.getHostName, response.advanced().getSourcePort, uri.getPath, uri.getQuery, uri.getFragment) } else { logger.error(s"Unable to find any servers on local network with multicast address $address.") throw new ConnectException(s"Unable to find any servers on local network with multicast address $address.") } } }
Example 60
Source File: CopyJarAwsCliAction.scala From berilia with Apache License 2.0 | 5 votes |
package com.criteo.dev.cluster.aws import java.io.File import java.net.{URI, URL} import com.criteo.dev.cluster.config.GlobalConfig import com.criteo.dev.cluster._ import com.criteo.dev.cluster.command.RsyncAction import org.jclouds.compute.domain.NodeMetadata.Status import org.slf4j.LoggerFactory import scala.collection.mutable import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} import sys.process._ @Public object CopyJarAwsCliAction extends CliAction[Unit] { override def command: String = "copy-jar-aws" override def usageArgs: List[Any] = List("instance.id", "source", "destination") override def help: String = "Copies a file from source to destination path to all nodes of a given cluster (if target directory exists)." private val logger = LoggerFactory.getLogger(CopyJarAwsCliAction.getClass) override def applyInternal(args: List[String], conf: GlobalConfig): Unit = { val instanceId = args(0) val cluster = AwsUtilities.getCluster(conf.backCompat, instanceId) if (!cluster.master.getStatus().equals(Status.RUNNING)) { logger.info("No running clusters found matching criteria.") } val source = args(1) val target = args(2) val sourceUri = new URI(source) val targetFile = new File(target) GeneralUtilities.prepareTempDir val sourceFile = sourceUri.getScheme().toLowerCase() match { case "http" => { val path = s"${GeneralUtilities.getTempDir()}/${targetFile.getName}" DevClusterProcess.process(s"curl -o $path $source").!! path } //only localhost supported case "file" => sourceUri.getPath() case _ => throw new IllegalArgumentException("Only http and file supported for sources for now.") } //copy over files in parallel val nodesToCopy = cluster.slaves ++ Set(cluster.master) logger.info(s"Copying to ${nodesToCopy.size} nodes in parallel.") val copyFutures = nodesToCopy.map(u => GeneralUtilities.getFuture { val targetN = NodeFactory.getAwsNode(conf.target.aws, u) val role = if (AwsUtilities.isSlave(u)) "Slave" else "Master" try { RsyncAction( srcPath = sourceFile, targetN = targetN, targetPath = target, sudo = true) s"$role Node ${u.getId()} with ${targetN.ip}: Copy successful." } catch { case e : Exception => s"$role Node ${u.getId()} with ${targetN.ip}: Copy Failed. This is normal if the given directory does not exist on the node." + s" If not expected, check the directory location and try again." } }) val aggCopyFuture = Future.sequence(copyFutures) val result = Await.result(aggCopyFuture, Duration.Inf) result.foreach(r => logger.info(r)) GeneralUtilities.cleanupTempDir } }
Example 61
Source File: Navigation.scala From metabrowse with Apache License 2.0 | 5 votes |
package metabrowse import scala.util.Try import scala.scalajs.js import java.net.URI import monaco.Range object Navigation { class State(val path: String, val selection: Option[Selection]) extends js.Object { override def toString: String = path + selection.map(_.toString).fold("")("#" + _) } case class Selection( startLine: Int, startColumn: Int, endLine: Int, endColumn: Int ) { def toRange() = new Range(startLine, startColumn, endLine, endColumn) override def toString: String = { def position(lineNumber: Int, column: Int): String = s"L$lineNumber${if (column > 1) s"C${column}" else ""}" val start = position(startLine, startColumn) if (startLine == endLine && startColumn == endColumn) start else if (startLine == endLine - 1 && startColumn == 1 && endColumn == 1) start else start + "-" + position(endLine, endColumn) } } object Selection { val Regex = """L(\d+)(C(\d+))?(-L(\d+)(C(\d+))?)?""".r def fromRange(range: Range): Selection = new Selection( range.startLineNumber.toInt, range.startColumn.toInt, range.endLineNumber.toInt, range.endColumn.toInt ) } def fromHistoryState(browserState: js.Any): Option[Navigation.State] = { Option(browserState) match { case Some(state) => Navigation.parseState(state.asInstanceOf[String]) case None => None } } def parseState(state: String): Option[Navigation.State] = { for (uri <- parseUri(state)) yield { val selection = Option(uri.getFragment).flatMap(parseSelection) new State(uri.getPath, selection) } } def parseUri(uri: String): Option[URI] = if (uri.isEmpty) None else Try(URI.create(uri)).toOption def parseSelection(selection: String): Option[Selection] = { selection match { case Selection.Regex(fromLine, _, fromCol, _, toLine, _, toCol) => val startLine = fromLine.toInt val startColumn = Option(fromCol).map(_.toInt).getOrElse(1) val endLine = Option(toLine).map(_.toInt).getOrElse(startLine) val endColumn = Option(toCol).map(_.toInt).getOrElse { if (endLine == startLine) startColumn else 1 } Some(new Selection(startLine, startColumn, endLine, endColumn)) case _ => None } } }
Example 62
Source File: ModelConverter.scala From api-first-hand with MIT License | 5 votes |
package de.zalando.swagger import java.io.File import java.net.URI import de.zalando.apifirst.Application.StrictModel import de.zalando.apifirst.Domain.Type import de.zalando.apifirst.Hypermedia.{ State, TransitionProperties } import de.zalando.apifirst.naming.Reference import de.zalando.swagger.strictModel.SwaggerModel trait ParameterNaming { type Types = Seq[Type] type NamedType = (Reference, Type) type NamedTypes = Seq[NamedType] } object ModelConverter extends ParameterNaming { def fromModel(base: URI, model: SwaggerModel, file: Option[File] = None, keyPrefix: String = "x-api-first", autoConvert: Boolean = true): StrictModel = { val converter = new TypeConverter(base, model, keyPrefix) val typeDefs = converter.convert val discriminators = converter.discriminators.toMap val inlineParameters = new ParametersConverter(base, model, keyPrefix, typeDefs, autoConvert).parameters val securityDefinitions = SecurityConverter.convertDefinitions(model.securityDefinitions) val apiCalls = new PathsConverter(base, model, keyPrefix, inlineParameters, securityDefinitions, file.map(_.getName)).convert val packageName = model.vendorExtensions.get(s"$keyPrefix-package") val inheritedPackageName = apiCalls.headOption collect { case h if apiCalls.seq.forall { _.handler.packageName == h.handler.packageName } => h.handler.packageName } val stateTransitionsTable = model.transitions map { case (fromName, toStates) => State(fromName) -> toStates.map { case (toName, props) => State(toName) -> TransitionProperties(Option(props).flatMap(_.get("condition").map(_.toString))) } } StrictModel(apiCalls, typeDefs.toMap, inlineParameters, discriminators, model.basePath, packageName orElse inheritedPackageName, stateTransitionsTable, securityDefinitions) } }
Example 63
Source File: StrictParseExamplesTest.scala From api-first-hand with MIT License | 5 votes |
package de.zalando.swagger import java.io.File import java.net.URI import de.zalando.swagger.strictModel.SwaggerModel import org.scalatest.{ FunSpec, MustMatchers } class StrictParseExamplesTest extends FunSpec with MustMatchers with ExpectedResults { val fixtures = new File(resourcesPath + "examples").listFiles ++ new File(resourcesPath + "schema_examples").listFiles describe("Strict Swagger Parser") { fixtures.filter(_.getName.endsWith(".yaml")).foreach { file => it(s"should parse the yaml swagger file ${file.getName} as specification") { val result = StrictYamlParser.parse(file) result._1 mustBe a[URI] result._2 mustBe a[SwaggerModel] } } } }
Example 64
Source File: FileLink.scala From almond with BSD 3-Clause "New" or "Revised" License | 5 votes |
package almond.display import java.net.URI final class FileLink private( val link: String, val beforeHtml: String, val afterHtml: String, val urlPrefix: String, val displayId: String ) extends UpdatableDisplay { private def copy( link: String = link, beforeHtml: String = beforeHtml, afterHtml: String = afterHtml, urlPrefix: String = urlPrefix ): FileLink = new FileLink(link, beforeHtml, afterHtml, urlPrefix, displayId) def withLink(link: String): FileLink = copy(link = link) def withBeforeHtml(beforeHtml: String): FileLink = copy(beforeHtml = beforeHtml) def withAfterHtml(afterHtml: String): FileLink = copy(afterHtml = afterHtml) def withUrlPrefix(urlPrefix: String): FileLink = copy(urlPrefix = urlPrefix) private def html: String = { val link0 = new URI(urlPrefix + link).toASCIIString beforeHtml + s"""<a href="$link0" target='_blank'>${FileLink.escapeHTML(link)}</a>""" + afterHtml } def data(): Map[String, String] = Map(Html.mimeType -> html) } object FileLink extends { def apply(link: String): FileLink = new FileLink(link, "", "<br>", "", UpdatableDisplay.generateId()) // https://stackoverflow.com/a/25228492/3714539 private def escapeHTML(s: String): String = { val out = new StringBuilder(java.lang.Math.max(16, s.length)) for (i <- 0 until s.length) { val c = s.charAt(i) if (c > 127 || c == '"' || c == '<' || c == '>' || c == '&') { out.append("&#") out.append(c.toInt) out.append(';') } else out.append(c) } out.toString } }
Example 65
Source File: JSONHTTP.scala From orders-aws with Apache License 2.0 | 5 votes |
package works.weave.socks.aws.orders.dataaccess.web import com.amazonaws.util.IOUtils import com.fasterxml.jackson.core.`type`.TypeReference import java.lang.reflect.ParameterizedType import java.lang.reflect.Type import java.net.URI import org.apache.http.client.methods.HttpGet import org.apache.http.impl.client.HttpClientBuilder import org.slf4j.LoggerFactory import works.weave.socks.aws.orders.ProjectDefaultJacksonMapper object JSONHTTP { val Log = LoggerFactory.getLogger(getClass) val objectMapper = ProjectDefaultJacksonMapper.build() // after (_.bla) def get[T : Manifest : NotNothing](uri : URI) : T = { val client = HttpClientBuilder.create.build val get = new HttpGet(uri) get.addHeader("Accept", "application/json") val response = client.execute(get) val responseString = IOUtils.toString(response.getEntity.getContent) Log.info(s"Got status ${response.getStatusLine.getStatusCode}") require(response.getStatusLine.getStatusCode == 200) Log.info(s"Got response from URI $uri: $responseString") objectMapper.readValue(responseString, typeReference[T]) } def typeReference[T : Manifest] = new TypeReference[T] { override def getType = typeFromManifest(manifest[T]) } def typeFromManifest(m : Manifest[_]) : Type = { if (m.typeArguments.isEmpty) { m.runtimeClass } else new ParameterizedType { def getRawType = m.runtimeClass def getActualTypeArguments = m.typeArguments.map(typeFromManifest).toArray def getOwnerType = null } } // Trick to disable nothign for type param sealed trait NotNothing[-T] object NotNothing { implicit object notNothing extends NotNothing[Any] implicit object `The error is because the missing type parameter was resolved to Nothing` extends NotNothing[Nothing] } }
Example 66
Source File: HealthCheckResource.scala From orders-aws with Apache License 2.0 | 5 votes |
package works.weave.socks.aws.orders.presentation.resource import java.net.URI import java.util.Calendar; import java.util.Date; import javax.ws.rs.GET import javax.ws.rs.Path import javax.ws.rs.core.Response import org.slf4j.LoggerFactory import org.springframework.stereotype.Component import works.weave.socks.aws.orders.presentation.resource.HealthCheckResource._ import works.weave.socks.spring.aws.DynamoConfiguration @Component @Path("health") class HealthCheckResource(dynamoConnection : DynamoConfiguration) { @GET def getHealthCheck() : Response = { Log.info("health check requested") val dateNow = Calendar.getInstance().getTime(); val ordersHealth = Map( "service" -> "orders-aws", "status" -> "OK", "time" -> dateNow) val dynamoDBHealth = scala.collection.mutable.Map( "service" -> "orders-aws-dynamodb", "status" -> "OK", "time" -> dateNow) try { val table = dynamoConnection.client.describeTable("orders") } catch { case unknown : Throwable => dynamoDBHealth("status") = "err" } val map = Map("health" -> Array(ordersHealth, dynamoDBHealth)) Log.info("health check completed") Response.created(new URI("http://tbd")).entity(map).build() } } object HealthCheckResource { val Log = LoggerFactory.getLogger(classOf[HealthCheckResource]) }
Example 67
Source File: Order.scala From orders-aws with Apache License 2.0 | 5 votes |
package works.weave.socks.aws.orders.presentation.value import java.net.URI import works.weave.socks.aws.orders.domain.GOrder import works.weave.socks.aws.orders.domain.GOrderTypes case class OrderRequest( id : String, customerId : String, customer : URI, // Swagger: OrderCustomer, address : URI, // Swagger: OrderAddress, card : URI, // Swagger: OrderCard, items : URI, // Swagger: List[OrderItems], shipment : URI, date : String, total : Number) extends GOrder[Order.Minimal] case class Order( id : String, customerId : String, customer : OrderCustomer, address : OrderAddress, card : OrderCard, items : List[OrderItems], shipment : Option[OrderShipment], date : String, total : Number, _links : OrderLinks) extends GOrder[Order.Full] object Order { trait Full extends GOrderTypes { override type Customer = OrderCustomer override type Address = OrderAddress override type Card = OrderCard override type Items = List[OrderItems] override type Shipment = Option[OrderShipment] } trait Minimal extends GOrderTypes { type Customer = URI type Address = URI type Card = URI type Items = URI type Shipment = URI } } case class OrderLinks( self : LinksSelf)
Example 68
Source File: AddressRepository.scala From orders-aws with Apache License 2.0 | 5 votes |
package works.weave.socks.aws.orders.domain.repository import java.net.URI import works.weave.socks.aws.orders.domain.repository.AddressRepository.Address trait AddressRepository { def findByURI(uri : URI) : Address } object AddressRepository { case class Address( number : String, street : String, city : String, postcode : String, country : String) }
Example 69
Source File: NativeSystemProvider.scala From scala-game-library with MIT License | 5 votes |
package sgl package native import sgl.util._ import java.net.URI import java.awt.Desktop import scala.concurrent.ExecutionContext import scala.language.implicitConversions trait NativeSystemProvider extends SystemProvider with PartsResourcePathProvider { object NativeSystem extends System { override def exit(): Unit = { sys.exit() } override def currentTimeMillis: Long = java.lang.System.currentTimeMillis override def nanoTime: Long = java.lang.System.nanoTime override def loadText(path: ResourcePath): Loader[Array[String]] = { ??? //val is = getClass.getClassLoader.getResourceAsStream(path) //scala.io.Source.fromInputStream(is).getLines } override def loadBinary(path: ResourcePath): Loader[Array[Byte]] = { ??? } override def openWebpage(uri: URI): Unit = { ??? } } override val System = NativeSystem // TODO: This is not really a root as we start with a first part ("assets"). We // should instead add the assets prefix at the time when we convert the parts to // a path. For now, it's a fine hack to get something working though. override val ResourcesRoot: ResourcePath = PartsResourcePath(Vector("assets")) // TODO: Add support for multi dpi in loadImage (so do not always use drawable-mdpi). override val MultiDPIResourcesRoot: ResourcePath = PartsResourcePath(Vector("assets", "drawable-mdpi")) }
Example 70
Source File: Html5SystemProvider.scala From scala-game-library with MIT License | 5 votes |
package sgl package html5 import java.net.URI import org.scalajs.dom import scala.scalajs.js import js.typedarray.{ArrayBuffer, TypedArrayBuffer} import sgl.util._ trait Html5SystemProvider extends SystemProvider with PartsResourcePathProvider { object Html5System extends System { override def exit(): Unit = {} override def currentTimeMillis: Long = js.Date.now.toLong // Note that there is no way to get nanosecond precision in Javascript, so we // have to do with microsecond granularity. override def nanoTime: Long = (dom.window.performance.now()*1000l*1000l).toLong //probably cleaner to return lazily and block only when iterator is called //class LazyTextResource(rawFile: dom.XMLHttpRequest) extends Iterator[String] = { //} //but the best would be to redefine these loading APIs to be async override def loadText(path: ResourcePath): Loader[Array[String]] = { val p = new DefaultLoader[Array[String]]() val rawFile = new dom.XMLHttpRequest() rawFile.open("GET", path.path, true) rawFile.onreadystatechange = (event: dom.Event) => { if(rawFile.readyState == 4) { if(rawFile.status == 200 || rawFile.status == 0) { p.success(rawFile.responseText.split("\n").toArray) } else { p.failure(new RuntimeException("file: " + path + " failed to load")) } } } rawFile.send(null) p.loader } override def loadBinary(path: ResourcePath): Loader[Array[Byte]] = { val p = new DefaultLoader[Array[Byte]]() val fileReq = new dom.XMLHttpRequest() fileReq.open("GET", path.path, true) fileReq.responseType = "arraybuffer" fileReq.onreadystatechange = (event: dom.Event) => { if(fileReq.readyState == 4) { if(fileReq.status == 200 || fileReq.status == 0) { val responseBuffer: ArrayBuffer = fileReq.response.asInstanceOf[ArrayBuffer] val bb: java.nio.ByteBuffer = TypedArrayBuffer.wrap(responseBuffer) val array: Array[Byte] = new Array(bb.remaining) bb.get(array) p.success(array) } else { p.failure(new RuntimeException("file: " + path + " failed to load")) } } } fileReq.send(null) p.loader } override def openWebpage(uri: URI): Unit = { dom.window.open(uri.toString) } } val System = Html5System override val ResourcesRoot = PartsResourcePath(Vector("static")) final override val MultiDPIResourcesRoot = PartsResourcePath(Vector()) }
Example 71
Source File: AndroidSystemProvider.scala From scala-game-library with MIT License | 5 votes |
package sgl package android import sgl.util._ import _root_.android.app.Activity import _root_.android.content.Intent import _root_.android.net.Uri import _root_.android.content.ActivityNotFoundException import java.net.URI import scala.concurrent.ExecutionContext trait AndroidSystemProvider extends SystemProvider with PartsResourcePathProvider { self: AndroidWindowProvider with Activity => object AndroidSystem extends System { override def exit(): Unit = { self.finish() } override def currentTimeMillis: Long = java.lang.System.currentTimeMillis override def nanoTime: Long = java.lang.System.nanoTime override def loadText(path: ResourcePath): Loader[Array[String]] = FutureLoader { try { val am = self.getAssets() val is = am.open(path.path) scala.io.Source.fromInputStream(is).getLines.toArray } catch { case (e: java.io.IOException) => throw new ResourceNotFoundException(path) } } override def loadBinary(path: ResourcePath): Loader[Array[Byte]] = FutureLoader { try { val am = self.getAssets() val is = am.open(path.path) val bis = new java.io.BufferedInputStream(is) val bytes = new scala.collection.mutable.ListBuffer[Byte] var b: Int = 0 while({ b = bis.read; b != -1}) { bytes.append(b.toByte) } bytes.toArray } catch { case (e: java.io.IOException) => throw new ResourceNotFoundException(path) } } override def openWebpage(uri: URI): Unit = { val browserIntent = new Intent(Intent.ACTION_VIEW, Uri.parse(uri.toString)) self.startActivity(browserIntent) } override def openGooglePlayApp(id: String, params: Map[String, String]): Unit = { try { val base = s"market://details?id=$id" val uri = Uri.parse(base + params.map{ case (k, v) => s"&$k=$v" }.mkString) val intent = new Intent(Intent.ACTION_VIEW, uri) self.startActivity(intent) } catch { case (ex: ActivityNotFoundException) => { // use the default implementation, which opens a webpage. super.openGooglePlayApp(id, params) } } } } val System = AndroidSystem override val ResourcesRoot = PartsResourcePath(Vector()) override val MultiDPIResourcesRoot = PartsResourcePath(Vector()) //Centralize the execution context used for asynchronous tasks in the Desktop backend //Could be overriden at wiring time implicit val executionContext: ExecutionContext = ExecutionContext.Implicits.global }
Example 72
Source File: AWTSystemProvider.scala From scala-game-library with MIT License | 5 votes |
package sgl package awt import sgl.util._ import java.net.URI import java.awt.Desktop import java.io.File import scala.concurrent.ExecutionContext trait AWTSystemProvider extends SystemProvider with PartsResourcePathProvider { object AWT5System extends System { override def exit(): Unit = { sys.exit() } override def currentTimeMillis: Long = java.lang.System.currentTimeMillis override def nanoTime: Long = java.lang.System.nanoTime override def loadText(path: ResourcePath): Loader[Array[String]] = { FutureLoader { val localAsset = if(DynamicResourcesEnabled) findDynamicResource(path) else None val is = localAsset.map(a => new java.io.FileInputStream(a)).getOrElse(getClass.getClassLoader.getResourceAsStream(path.path)) if(is == null) { throw new ResourceNotFoundException(path) } scala.io.Source.fromInputStream(is).getLines.toArray } } override def loadBinary(path: ResourcePath): Loader[Array[Byte]] = { FutureLoader { val localAsset = if(DynamicResourcesEnabled) findDynamicResource(path) else None val is = localAsset.map(a => new java.io.FileInputStream(a)).getOrElse(getClass.getClassLoader.getResourceAsStream(path.path)) if(is == null) { throw new ResourceNotFoundException(path) } val bis = new java.io.BufferedInputStream(is) val bytes = new scala.collection.mutable.ListBuffer[Byte] var b: Int = 0 while({ b = bis.read; b != -1}) { bytes.append(b.toByte) } bytes.toArray } } override def openWebpage(uri: URI): Unit = { val desktop = if(Desktop.isDesktopSupported()) Desktop.getDesktop() else null if(desktop != null && desktop.isSupported(Desktop.Action.BROWSE)) { try { desktop.browse(uri); } catch { case (e: Exception) => e.printStackTrace() } } } } val System = AWT5System override val ResourcesRoot = PartsResourcePath(Vector()) override val MultiDPIResourcesRoot = PartsResourcePath(Vector()) val DynamicResourcesEnabled: Boolean = false // TODO: provide a command line flag to control this as well, in particular to give // the asset directory. def findDynamicResource(path: ResourcePath): Option[File] = { def findFromDir(d: File): Option[File] = { val asset = new File(d.getAbsolutePath + "/assets/" + path.path) if(asset.exists) Some(asset) else None } def findFromWorkingDir: Option[File] = findFromDir(new File(java.lang.System.getProperty("user.dir"))) val protectionDomain = this.getClass.getProtectionDomain() val codeSource = protectionDomain.getCodeSource() if(codeSource == null) return findFromWorkingDir val jar = new File(codeSource.getLocation.toURI.getPath) if(!jar.exists) return findFromWorkingDir val parent = jar.getParentFile if(parent == null) return findFromWorkingDir findFromDir(parent).orElse(findFromWorkingDir) } //Centralize the execution context used for asynchronous tasks in the Desktop backend //Could be overriden at wiring time implicit val executionContext: ExecutionContext = ExecutionContext.Implicits.global }
Example 73
Source File: Config.scala From zipkin-mesos-framework with Apache License 2.0 | 5 votes |
package net.elodina.mesos.zipkin import java.io.{File, FileInputStream} import java.net.URI import java.util.Properties import net.elodina.mesos.zipkin.utils.{BindAddress, Period} object Config { val DEFAULT_FILE = new File("zipkin-mesos.properties") var debug: Boolean = false var genTraces: Boolean = false var storage: String = "file:zipkin-mesos.json" var master: Option[String] = None var principal: Option[String] = None var secret: Option[String] = None var user: Option[String] = None var frameworkName: String = "zipkin" var frameworkRole: String = "*" var frameworkTimeout: Period = new Period("30d") var log: Option[File] = None var api: Option[String] = None var bindAddress: Option[BindAddress] = None def apiPort: Int = { val port = new URI(getApi).getPort if (port == -1) 80 else port } def replaceApiPort(port: Int): Unit = { val prev: URI = new URI(getApi) api = Some("" + new URI( prev.getScheme, prev.getUserInfo, prev.getHost, port, prev.getPath, prev.getQuery, prev.getFragment )) } def getApi: String = { api.getOrElse(throw new Error("api not initialized")) } def getMaster: String = { master.getOrElse(throw new Error("master not initialized")) } def getZk: String = { master.getOrElse(throw new Error("zookeeper not initialized")) } private[zipkin] def loadFromFile(file: File): Unit = { val props: Properties = new Properties() val stream: FileInputStream = new FileInputStream(file) props.load(stream) stream.close() if (props.containsKey("debug")) debug = java.lang.Boolean.valueOf(props.getProperty("debug")) if (props.containsKey("genTraces")) genTraces = java.lang.Boolean.valueOf(props.getProperty("genTraces")) if (props.containsKey("storage")) storage = props.getProperty("storage") if (props.containsKey("master")) master = Some(props.getProperty("master")) if (props.containsKey("user")) user = Some(props.getProperty("user")) if (props.containsKey("principal")) principal = Some(props.getProperty("principal")) if (props.containsKey("secret")) secret = Some(props.getProperty("secret")) if (props.containsKey("framework-name")) frameworkName = props.getProperty("framework-name") if (props.containsKey("framework-role")) frameworkRole = props.getProperty("framework-role") if (props.containsKey("framework-timeout")) frameworkTimeout = new Period(props.getProperty("framework-timeout")) if (props.containsKey("log")) log = Some(new File(props.getProperty("log"))) if (props.containsKey("api")) api = Some(props.getProperty("api")) if (props.containsKey("bind-address")) bindAddress = Some(new BindAddress(props.getProperty("bind-address"))) } override def toString: String = { s""" |debug: $debug, storage: $storage |mesos: master=$master, user=${if (user.isEmpty || user.get.isEmpty) "<default>" else user} |principal=${principal.getOrElse("<none>")}, secret=${if (secret.isDefined) "*****" else "<none>"} |framework: name=$frameworkName, role=$frameworkRole, timeout=$frameworkTimeout |api: $api, bind-address: ${bindAddress.getOrElse("<all>")}, genTraces: $genTraces """.stripMargin.trim } }
Example 74
Source File: BigFileDatasource.scala From glow with Apache License 2.0 | 5 votes |
package io.projectglow.sql import java.net.URI import java.util.ServiceLoader import scala.collection.JavaConverters._ import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.spark.rdd.RDD import org.apache.spark.sql.sources._ import org.apache.spark.sql.types.StructType import org.apache.spark.sql.{DataFrame, SQLContext, SaveMode} import io.projectglow.common.{GlowLogging, WithUtils} def write(rdd: RDD[Array[Byte]], path: String) { val uri = new URI(path) uploaders.find(_.canUpload(rdd.sparkContext.hadoopConfiguration, path)) match { case Some(uploader) => uploader.upload(rdd, path) case None => logger.info(s"Could not find a parallel uploader for $path, uploading from the driver") writeFileFromDriver(new Path(uri), rdd) } } private def writeFileFromDriver(path: Path, byteRdd: RDD[Array[Byte]]): Unit = { val sc = byteRdd.sparkContext val fs = path.getFileSystem(sc.hadoopConfiguration) WithUtils.withCloseable(fs.create(path)) { stream => WithUtils.withCachedRDD(byteRdd) { cachedRdd => cachedRdd.count() cachedRdd.toLocalIterator.foreach { chunk => stream.write(chunk) } } } } }
Example 75
Source File: HadoopLineIterator.scala From glow with Apache License 2.0 | 5 votes |
package io.projectglow.sql.util import java.io.Closeable import java.net.URI import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.hadoop.io.Text import org.apache.hadoop.mapreduce._ import org.apache.hadoop.mapreduce.lib.input.{FileSplit, LineRecordReader} import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl import org.apache.spark.sql.execution.datasources.RecordReaderIterator import io.projectglow.common.GlowLogging class HadoopLineIterator( path: String, start: Long, length: Long, lineSeparator: Option[Array[Byte]], conf: Configuration) extends Iterator[Text] with Closeable with GlowLogging { private val iterator = { val fileSplit = new FileSplit( new Path(new URI(path)), start, length, // TODO: Implement Locality Array.empty ) val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0) val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId) val reader = lineSeparator match { case Some(sep) => new LineRecordReader(sep) // If the line separator is `None`, it covers `\r`, `\r\n` and `\n`. case _ => new LineRecordReader() } reader.initialize(fileSplit, hadoopAttemptContext) new RecordReaderIterator(reader) } override def hasNext: Boolean = { iterator.hasNext } override def next(): Text = { iterator.next() } override def close(): Unit = { iterator.close() } }
Example 76
Source File: SchemaRegistryOps.scala From embedded-kafka-schema-registry with MIT License | 5 votes |
package net.manub.embeddedkafka.schemaregistry.ops import java.net.{ServerSocket, URI} import java.util.Properties import io.confluent.kafka.schemaregistry.rest.{ SchemaRegistryConfig, SchemaRegistryRestApplication } import io.confluent.rest.RestConfig import net.manub.embeddedkafka.EmbeddedServer import net.manub.embeddedkafka.ops.RunningServersOps import net.manub.embeddedkafka.schemaregistry.{EmbeddedKafkaConfig, EmbeddedSR} import scala.jdk.CollectionConverters._ def stopSchemaRegistry(): Unit = runningServers.stopAndRemove(isEmbeddedSR) private[embeddedkafka] def isEmbeddedSR(server: EmbeddedServer): Boolean = server.isInstanceOf[EmbeddedSR] private[embeddedkafka] def schemaRegistryPort( restApp: SchemaRegistryRestApplication ): Int = { val listeners = restApp.getConfiguration.originalProperties .getProperty(RestConfig.LISTENERS_CONFIG) URI.create(listeners).getPort } }
Example 77
Source File: ResizableImage.scala From MoVE with Mozilla Public License 2.0 | 5 votes |
package de.thm.move.views.shapes import java.net.URI import javafx.scene.image.{Image, ImageView} val resizeWidth = img.getWidth > img.getHeight if(resizeWidth) setFitWidth(200) else setFitHeight(200) override def getWidth: Double = { if(resizeWidth) getFitWidth //get the fitting width else getBoundsInLocal.getWidth //get the calculated with } override def getHeight: Double = { if(!resizeWidth) getFitHeight //get the fitting height else getBoundsInLocal.getHeight //get the calculated height } override def setWidth(w:Double): Unit = { if(resizeWidth) setFitWidth(w) //set fitting width else () //don't do anything; this side gets calculated according to the height } override def setHeight(h:Double): Unit = { if(!resizeWidth) setFitHeight(h) else () } override def copy: ResizableImage = { val duplicate = new ResizableImage(srcEither, img) duplicate.copyPosition(this) duplicate } } object ResizableImage { def apply(uri:URI, img:Image) = new ResizableImage(Left(uri), img) def apply(bytes:Array[Byte], img:Image) = new ResizableImage(Right(bytes), img) }
Example 78
Source File: DrawCtrl.scala From MoVE with Mozilla Public License 2.0 | 5 votes |
package de.thm.move.controllers import java.net.URI import javafx.beans.property.SimpleBooleanProperty import javafx.event.ActionEvent import javafx.scene.control.TextField import javafx.scene.input.MouseEvent import javafx.scene.paint.Color import javafx.scene.text.Font import de.thm.move.controllers.drawing._ import de.thm.move.controllers.factorys.ShapeFactory import de.thm.move.implicits.FxHandlerImplicits._ import de.thm.move.models.SelectedShape import de.thm.move.models.SelectedShape._ import de.thm.move.util.GeometryUtils import de.thm.move.types._ import de.thm.move.views.anchors.Anchor import de.thm.move.views.panes.DrawPanel import de.thm.move.views.shapes._ def abortDrawingProcess(): Unit = { removeTmpShapes(tmpShapeId) drawStrategies.values.foreach(_.reset()) } def drawImage(imgUri:URI): Unit = { val imgview = ShapeFactory.newImage(imgUri) changeLike.addShape(imgview) changeLike.addNode(imgview.getAnchors) } def drawText(x:Double,y:Double,color:Color,font:Font): Unit = { val text = new TextField() text.setId(tmpShapeId) text.setOnAction { _:ActionEvent => //replace TextField with ResizableText changeLike.remove(text) val txt = new ResizableText(text.getText, x,y, font) txt.setFontColor(color) changeLike.addShape(txt) } text.setLayoutX(x) text.setLayoutY(y) changeLike.addNode(text) text.requestFocus() } }
Example 79
Source File: ShapeFactory.scala From MoVE with Mozilla Public License 2.0 | 5 votes |
package de.thm.move.controllers.factorys import java.net.URI import javafx.scene.image.Image import javafx.scene.paint.Color import javafx.scene.shape.{LineTo, MoveTo} import de.thm.move.types._ import de.thm.move.models.SelectedShape import de.thm.move.views.anchors.Anchor import de.thm.move.views.shapes._ object ShapeFactory { def newImage(imgUri:URI):ResizableImage = { val img = new Image(imgUri.toString) ResizableImage(imgUri, img) } private def setDefaultColor[T <: ColorizableShape](shape:T)(fillColor:Color, strokeColor:Color, strokeWidth:Int): T = { shape.colorizeShape(fillColor, strokeColor) shape.setStrokeWidth(strokeWidth) shape } def newRectangle(point:Point, width:Double, height:Double): (Color, Color, Int) => ResizableRectangle = { val rectangle = new ResizableRectangle(point, width, height) setDefaultColor(rectangle) } def newLine(start:Point, end:Point, strokeSize:Int): (Color, Color, Int) => ResizableLine = { val line = new ResizableLine(start, end, strokeSize) setDefaultColor(line) } def newCircle(point:Point, width:Double, height:Double): (Color, Color, Int) => ResizableCircle = { val circle = new ResizableCircle(point, width, height) setDefaultColor(circle) } def newAnchor(point:Point):Anchor = { val (x,y) = point new Anchor(x,y) } def newPolygon(points:List[Point]): (Color, Color, Int) => ResizablePolygon = { val polygon = ResizablePolygon(points) setDefaultColor(polygon) } def newPath(points:List[Point]): (Color,Color, Int) => ResizablePath = { val (hdX,hdY) = points.head val startpoint = new MoveTo(hdX,hdY) val pathElements = for ( (x,y) <- points.tail ) yield { new LineTo(x,y) } val path = new ResizablePath(startpoint, pathElements) (setDefaultColor(path) _) } def createTemporaryShape(shape:SelectedShape.SelectedShape, p:Point)(stroke:Color): ResizableShape = { (shape match { case SelectedShape.Rectangle => newRectangle(p, 2,2) case SelectedShape.Circle => newCircle(p,2,2) case SelectedShape.Line => newLine(p,p, 2) })(null, stroke, 2) } }
Example 80
Source File: ResourceUtils.scala From MoVE with Mozilla Public License 2.0 | 5 votes |
package de.thm.move.util import java.net.URI import java.nio.file.{Files, Path, Paths} import java.util.Base64 import javafx.scene.paint.Color import de.thm.move.Global._ object ResourceUtils { def getFilename(uri:URI):String = { val uriStr = uri.toString uriStr.substring(uriStr.lastIndexOf("/")+1, uriStr.length) } def getFilename(p:Path):String = { p.getFileName.toString } def asColor(key:String): Option[Color] = config.getString(key).map(Color.web) def copy(src:URI, target:URI): Unit = { val targetPath = Paths.get(target).getParent val srcPath = Paths.get(src) val filename = srcPath.getFileName Files.copy(srcPath, targetPath.resolve(filename)) } }
Example 81
Source File: CodeGeneratorTest.scala From MoVE with Mozilla Public License 2.0 | 5 votes |
package de.thm.move.models import java.io.PrintWriter import java.net.URI import java.nio.charset.Charset import java.nio.file.{Paths, Files} import java.util.Base64 import javafx.scene.Node import javafx.scene.paint.{Paint, Color} import javafx.scene.shape.{LineTo, MoveTo} import javafx.scene.text.TextAlignment import de.thm.move.MoveSpec import de.thm.move.models.ModelicaCodeGenerator.FormatSrc import de.thm.move.models.ModelicaCodeGenerator.FormatSrc.FormatSrc import de.thm.move.types._ import de.thm.move.util.ResourceUtils import de.thm.move.util.GeometryUtils import de.thm.move.views.shapes._ class CodeGeneratorTest extends MoveSpec { val dummyURL = Paths.get(System.getProperty("user.home")).toUri private def eqTest(toTest:String,expected:String): Unit = { if(!toTest.contains(expected)) { println(toTest) println("Expected: "+expected) } assert(toTest.contains(expected), s"Expected [$toTest] containing [$expected]") } "ModelicaCodeGenerator" should "generate Rectangles" in { val generator = new ModelicaCodeGenerator(FormatSrc.Pretty, 1, 500,500) val rect = new ResizableRectangle((0,0), 100,100) rect.colorizeShape(Color.BLACK, Color.BLACK) rect.setRotate(90.0) val str = generator.generateShape(rect, "test", dummyURL)(1) eqTest(str, "origin = {50,450}") eqTest(str, "extent = {{-50,50}, {50,-50}}") val generator2 = new ModelicaCodeGenerator(FormatSrc.Pretty, 4, 500,500) val str2 = generator2.generateShape(rect, "test", dummyURL)(1) eqTest(str2, "origin = {12,112}") eqTest(str2, "extent = {{-12,12}, {12,-12}}") } it should "generate Circles" in { val generator = new ModelicaCodeGenerator(FormatSrc.Pretty, 1, 500,500) val circle = new ResizableCircle((100,100), 50,50) circle.colorizeShape(Color.BLACK, Color.BLACK) circle.setRotate(90.0) val str = generator.generateShape(circle, "test", dummyURL)(1) eqTest(str, "origin = {100,400}") eqTest(str, "extent = {{-50,50}, {50,-50}}") } }
Example 82
Source File: InMemoryJavaSource.scala From aloha with MIT License | 5 votes |
package com.eharmony.aloha.models.h2o.compiler import java.net.URI import javax.tools.JavaFileObject.Kind import javax.tools.SimpleJavaFileObject import com.eharmony.aloha.reflect.{RefInfo, RefInfoOps} import scala.util.matching.Regex private[h2o] case class InMemoryJavaSource[B](code: String, className: String)(implicit baseClassInfo: RefInfo[B]) extends SimpleJavaFileObject(InMemoryJavaSource.classNameToUri(className), Kind.SOURCE) { override def getCharContent(ignoreEncodingErrors: Boolean): CharSequence = code } private[h2o] object InMemoryJavaSource { val pkgRe = """package\s+([a-z_][a-zA-Z0-9_]*(\.[a-z_][a-zA-Z0-9_]*)*)\s*;""".r def fromString[B](code: String)(implicit baseClass: RefInfo[B]): Option[InMemoryJavaSource[B]] = { val classNameRe = classNameRegex[B](code) val className = determineClassName(code, classNameRe) map determineCanonicalClassName(code) className map (cn => new InMemoryJavaSource[B](code, cn)) } def classNameRegex[B](code: String)(implicit baseClass: RefInfo[B]): Regex = { val ext = if (RefInfoOps.isJavaInterface[B]) "implements" else "extends" val re = RefInfoOps.classRegex[B].toString() ("""public\s+class\s+([A-Za-z_][0-9A-Za-z_]*)\s+""" + ext + """\s+""" + re + """[\s<\{]""").r } def determineClassName(code: String, classNameRegex: Regex): Option[String] = classNameRegex.findFirstMatchIn(code) map { _.group(1) } def determineCanonicalClassName(code: String)(className: String) = (pkgRe.findFirstIn(code) map { case pkgRe(p, _) => s"$p." } getOrElse "") + className def classNameToUri(className: String): URI = URI.create("string:///" + className.replace(".", "/") + Kind.SOURCE.extension) }
Example 83
Source File: ForgeInstallSpec.scala From PackUpdate with Apache License 2.0 | 5 votes |
package at.chaosfield.packupdate.json import java.io.{File, FileNotFoundException} import java.net.URI import scala.xml.XML import at.chaosfield.packupdate.common.{FileManager, Log, MavenPath} import org.json4s.jackson.JsonMethods case class ForgeInstallSpec( install: InstallInformation, versionInfo: VersionInformation, spec: Int = 0 ) case class InstallInformation( profileName: String, target: String, path: MavenPath, version: String, filePath: String, welcome: String, minecraft: String, mirrorList: URI, logo: String, modList: String ) case class VersionInformation( id: String, `type`: String, minecraftArguments: String, mainClass: String, inheritsFrom: String, jar: String, libraries: Array[LibraryInformation] ) case class LibraryInformation( name: MavenPath, url: Option[URI], checksums: Array[String], serverreq: Boolean = false, clientreq: Boolean = false ) { def getPom(mavenPath: MavenPath, log: Log): xml.Elem = { var lastException: Option[Exception] = None val tryUrls = url.toList ++ LibraryInformation.RepoList for (url <- tryUrls) { try { val pomUrl = url.resolve(mavenPath.getPom.getFilePath).toURL val data = FileManager.readStreamToString(FileManager.retrieveUrl(pomUrl, log)._1) return XML.loadString(data) } catch { case e: FileNotFoundException => lastException = Some(e) log.debug(s"File not found at $url, trying next...") } } throw lastException.get } } object LibraryInformation { final val RepoList = List("https://repo.maven.apache.org/maven2/", "https://libraries.minecraft.net/").map(new URI(_)) }
Example 84
Source File: VanillaVersionSpec.scala From PackUpdate with Apache License 2.0 | 5 votes |
package at.chaosfield.packupdate.json import java.net.URI // https://launchermeta.mojang.com/mc/game/version_manifest.json case class VanillaVersionSpec( versions: Array[VanillaVersion] ) case class VanillaVersion( id: String, url: URI ) case class VanillaVersionManifest( downloads: VanillaDownloadInfo ) case class VanillaDownloadInfo( server: VanillaDownloadDetails ) case class VanillaDownloadDetails( sha1: String, size: Int, url: URI )
Example 85
Source File: LocalDatabase.scala From PackUpdate with Apache License 2.0 | 5 votes |
package at.chaosfield.packupdate.json import java.io.File import java.net.URI import at.chaosfield.packupdate.common._ case class LocalDatabase( installedComponents: Array[InstalledComponent] ) case class InstalledComponent( name: String, version: String, componentType: ComponentType, downloadUrl: Option[URI], downloadHash: Option[FileHash], files: Array[InstalledFile], flags: Array[ComponentFlag] ) { def toComponent: Component = new Component(name, version, downloadUrl, componentType, downloadHash, flags) def display = s"$name $version" def hasFlag(flag: ComponentFlag) = flags.contains(flag) def validateIntegrity(config: MainConfig, log: Log): Boolean = { files.forall(instFile => { val f = instFile.discoverActual(config.minecraftDir) val hash = instFile.hash f match { case Some((file, enabled)) => // This used to be a single line, i separated the variables to increase readability val isMod = componentType == ComponentType.Mod val isOptionl = hasFlag(ComponentFlag.Optional) val wrongState = enabled == hasFlag(ComponentFlag.Disabled) if ((isMod && !isOptionl && wrongState) || (!isMod && wrongState)) { log.debug(s"The file $name:$file is in wrong enabled state, or disabling is not allowed for this component") false } else { val disableIntegrity = flags.contains(ComponentFlag.NoIntegrity) val isConfig = componentType == ComponentType.Config val forceOverwrite = flags.contains(ComponentFlag.ForceOverwrite) if (!disableIntegrity && (!isConfig || forceOverwrite)) { val h = FileHash.forFile(file) if (h != hash) { log.debug(s"sha256($name:$file) => $h, should be $hash") } h == hash } else { true } } case None => log.debug(s"$name:${instFile.fileName} does not exist") false } }) } } object InstalledComponent { def fromRemote(c: Component, installedFiles: Array[InstalledFile]): InstalledComponent = { InstalledComponent( c.name, c.version, c.componentType, c.downloadUrl, c.hash, installedFiles, c.flags ) } } case class InstalledFile( fileName: String, hash: FileHash ) { def discoverActual(base: File): Option[(File, Boolean)] = { val enabled = new File(base, enabledFile()) val disabled = new File(base, enabledFile() + ".disabled") List((enabled, true), (disabled, false)).find(_._1.exists()) } def enabledFile(): String = { if (fileName.endsWith(".disabled")) { fileName.substring(0, fileName.length - 9) } else { fileName } } }
Example 86
Source File: GithubRelease.scala From PackUpdate with Apache License 2.0 | 5 votes |
package at.chaosfield.packupdate.json import java.net.URI import java.util.Date case class GithubRelease( id: Int, url: URI, assetsUrl: URI, htmlUrl: URI, nodeId: String, tagName: String, targetCommitish: String, name: String, draft: Boolean, author: GithubUser, prerelease: Boolean, createdAt: Date, publishedAt: Date, assets: Array[GithubAsset], tarballUrl: URI, zipballUrl: URI, body: String ) case class GithubUser( id: Int, login: String, nodeId: String, avatarUrl: String, gravatarId: String, url: URI, htmlUrl: URI, reposUrl: URI, `type`: String, siteAdmin: Boolean ) case class GithubAsset( id: Int, nodeId: String, url: URI, name: String, label: Option[String], uploader: GithubUser, size: Int, downloadCount: Int, createdAt: Date, updatedAt: Date, browserDownloadUrl: URI )
Example 87
Source File: Component.scala From PackUpdate with Apache License 2.0 | 5 votes |
package at.chaosfield.packupdate.common import java.net.URI class Component(val name: String, val version: String, val _downloadUrl: Option[URI], val componentType: ComponentType, val hash: Option[FileHash], val flags: Array[ComponentFlag]) { def toCSV = { Array( name, version, downloadUrl.map(_.toString).getOrElse(""), componentType.stringValue, hash.getOrElse(""), flags.map(_.internalName).mkString(";") ).mkString(",") } def neededOnSide(packSide: PackSide): Boolean = { packSide match { case PackSide.Client => !flags.contains(ComponentFlag.ServerOnly) case PackSide.Server => !flags.contains(ComponentFlag.ClientOnly) } } def downloadUrl: Option[URI] = if (componentType == ComponentType.Forge) { _downloadUrl match { case Some(uri: URI) => Some(uri) case None => Some(new URI(s"https://files.minecraftforge.net/maven/net/minecraftforge/forge/$version/forge-$version-installer.jar")) } } else { _downloadUrl } def display = s"$name $version" def hasFlag(flag: ComponentFlag): Boolean = flags.contains(flag) } object Component { def fromCSV(data: Array[String]) = { new Component( data(0), // name data(1), // version if (data(2).isEmpty) None else Some(new URI(data(2))), // downloadUrl ComponentType.fromString(data(3)).getOrElse(ComponentType.Unknown), // componentType data.lift(4).filter(_ != "").map(new FileHash(_)), // hash data.lift(5).map(_.split(';')).getOrElse(Array.empty[String]).flatMap(ComponentFlag.fromString) // flags ) } }
Example 88
Source File: SqlDatabase.scala From scala-clippy with Apache License 2.0 | 5 votes |
package util import java.net.URI import com.typesafe.config.ConfigValueFactory._ import com.typesafe.config.{Config, ConfigFactory} import com.typesafe.scalalogging.StrictLogging import org.flywaydb.core.Flyway import slick.driver.JdbcProfile import slick.jdbc.JdbcBackend._ case class SqlDatabase( db: slick.jdbc.JdbcBackend#Database, driver: JdbcProfile, connectionString: JdbcConnectionString ) { def updateSchema() { val flyway = new Flyway() flyway.setDataSource(connectionString.url, connectionString.username, connectionString.password) flyway.migrate() } def close() { db.close() } } case class JdbcConnectionString(url: String, username: String = "", password: String = "") object SqlDatabase extends StrictLogging { def create(config: DatabaseConfig): SqlDatabase = { val envDatabaseUrl = System.getenv("DATABASE_URL") if (config.dbPostgresServerName.length > 0) createPostgresFromConfig(config) else if (envDatabaseUrl != null) createPostgresFromEnv(envDatabaseUrl) else createEmbedded(config) } def createEmbedded(connectionString: String): SqlDatabase = { val db = Database.forURL(connectionString) SqlDatabase(db, slick.driver.H2Driver, JdbcConnectionString(connectionString)) } private def createPostgresFromEnv(envDatabaseUrl: String) = { import DatabaseConfig._ val dbUri = new URI(envDatabaseUrl) val username = dbUri.getUserInfo.split(":")(0) val password = dbUri.getUserInfo.split(":")(1) val intermediaryConfig = new DatabaseConfig { override def rootConfig: Config = ConfigFactory .empty() .withValue(PostgresDSClass, fromAnyRef("org.postgresql.ds.PGSimpleDataSource")) .withValue(PostgresServerNameKey, fromAnyRef(dbUri.getHost)) .withValue(PostgresPortKey, fromAnyRef(dbUri.getPort)) .withValue(PostgresDbNameKey, fromAnyRef(dbUri.getPath.tail)) .withValue(PostgresUsernameKey, fromAnyRef(username)) .withValue(PostgresPasswordKey, fromAnyRef(password)) .withFallback(ConfigFactory.load()) } createPostgresFromConfig(intermediaryConfig) } private def postgresUrl(host: String, port: String, dbName: String) = s"jdbc:postgresql://$host:$port/$dbName" private def postgresConnectionString(config: DatabaseConfig) = { val host = config.dbPostgresServerName val port = config.dbPostgresPort val dbName = config.dbPostgresDbName val username = config.dbPostgresUsername val password = config.dbPostgresPassword JdbcConnectionString(postgresUrl(host, port, dbName), username, password) } private def createPostgresFromConfig(config: DatabaseConfig) = { val db = Database.forConfig("db.postgres", config.rootConfig) SqlDatabase(db, slick.driver.PostgresDriver, postgresConnectionString(config)) } private def createEmbedded(config: DatabaseConfig): SqlDatabase = { val db = Database.forConfig("db.h2") SqlDatabase(db, slick.driver.H2Driver, JdbcConnectionString(embeddedConnectionStringFromConfig(config))) } private def embeddedConnectionStringFromConfig(config: DatabaseConfig): String = { val url = config.dbH2Url val fullPath = url.split(":")(3) logger.info(s"Using an embedded database, with data files located at: $fullPath") url } }
Example 89
Source File: SparkeyUri.scala From scio with Apache License 2.0 | 5 votes |
package com.spotify.scio.extra.sparkey import java.io.File import java.net.URI import java.nio.file.{Files, Paths} import com.spotify.scio.coders.Coder import com.spotify.scio.util.{RemoteFileUtil, ScioUtil} import com.spotify.sparkey.extra.ThreadLocalSparkeyReader import com.spotify.sparkey.{Sparkey, SparkeyReader} import org.apache.beam.sdk.options.PipelineOptions import scala.jdk.CollectionConverters._ trait SparkeyUri extends Serializable { val basePath: String def getReader: SparkeyReader private[sparkey] def exists: Boolean override def toString: String = basePath } private[sparkey] object SparkeyUri { def apply(basePath: String, opts: PipelineOptions): SparkeyUri = if (ScioUtil.isLocalUri(new URI(basePath))) { LocalSparkeyUri(basePath) } else { RemoteSparkeyUri(basePath, opts) } def extensions: Seq[String] = Seq(".spi", ".spl") implicit def coderSparkeyURI: Coder[SparkeyUri] = Coder.kryo[SparkeyUri] } private case class LocalSparkeyUri(basePath: String) extends SparkeyUri { override def getReader: SparkeyReader = new ThreadLocalSparkeyReader(new File(basePath)) override private[sparkey] def exists: Boolean = SparkeyUri.extensions.map(e => new File(basePath + e)).exists(_.exists) } private object RemoteSparkeyUri { def apply(basePath: String, options: PipelineOptions): RemoteSparkeyUri = RemoteSparkeyUri(basePath, RemoteFileUtil.create(options)) } private case class RemoteSparkeyUri(basePath: String, rfu: RemoteFileUtil) extends SparkeyUri { override def getReader: SparkeyReader = { val uris = SparkeyUri.extensions.map(e => new URI(basePath + e)) val paths = rfu.download(uris.asJava).asScala new ThreadLocalSparkeyReader(paths.head.toFile) } override private[sparkey] def exists: Boolean = SparkeyUri.extensions .exists(e => rfu.remoteExists(new URI(basePath + e))) } private[sparkey] class SparkeyWriter(val uri: SparkeyUri, maxMemoryUsage: Long = -1) { private val localFile = uri match { case u: LocalSparkeyUri => u.basePath case _: RemoteSparkeyUri => Files.createTempDirectory("sparkey-").resolve("data").toString } private lazy val delegate = { val file = new File(localFile) Files.createDirectories(file.getParentFile.toPath) Sparkey.createNew(file) } def put(key: String, value: String): Unit = delegate.put(key, value) def put(key: Array[Byte], value: Array[Byte]): Unit = delegate.put(key, value) def close(): Unit = { delegate.flush() if (maxMemoryUsage > 0) { delegate.setMaxMemory(maxMemoryUsage) } delegate.writeHash() delegate.close() uri match { case u: RemoteSparkeyUri => // Copy .spi and .spl to GCS SparkeyUri.extensions.foreach { e => val src = Paths.get(localFile + e) val dst = new URI(u.basePath + e) u.rfu.upload(src, dst) } case _ => () } } }
Example 90
Source File: AnnoyUri.scala From scio with Apache License 2.0 | 5 votes |
package com.spotify.scio.extra.annoy import java.io.File import java.net.URI import java.nio.file.{Files, Paths} import annoy4s._ import com.spotify.scio.coders.Coder import com.spotify.scio.util.{RemoteFileUtil, ScioUtil} import org.apache.beam.sdk.options.PipelineOptions trait AnnoyUri extends Serializable { val path: String private[annoy] def getReader(metric: AnnoyMetric, dim: Int): AnnoyReader private[annoy] def saveAndClose(annoyIndex: AnnoyWriter): Unit private[annoy] def exists: Boolean override def toString: String = path } private[annoy] object AnnoyUri { def apply(path: String, opts: PipelineOptions): AnnoyUri = if (ScioUtil.isLocalUri(new URI(path))) { new LocalAnnoyUri(path) } else { new RemoteAnnoyUri(path, opts) } implicit val annoyUriCoder: Coder[AnnoyUri] = Coder.kryo[AnnoyUri] } private class LocalAnnoyUri(val path: String) extends AnnoyUri { override private[annoy] def getReader(metric: AnnoyMetric, dim: Int): AnnoyReader = new AnnoyReader(path, metric, dim) override private[annoy] def saveAndClose(w: AnnoyWriter): Unit = try { w.build() w.save(path.toString) } finally { w.free() } override private[annoy] def exists: Boolean = new File(path).exists() } private class RemoteAnnoyUri(val path: String, options: PipelineOptions) extends AnnoyUri { private[this] val rfu: RemoteFileUtil = RemoteFileUtil.create(options) override private[annoy] def getReader(metric: AnnoyMetric, dim: Int): AnnoyReader = { val localPath = rfu.download(new URI(path)) new AnnoyReader(localPath.toString, metric, dim) } override private[annoy] def saveAndClose(w: AnnoyWriter): Unit = { val tempFile = Files.createTempDirectory("annoy-").resolve("data") try { w.build() w.save(tempFile.toString) } finally { w.free() } rfu.upload(Paths.get(tempFile.toString), new URI(path)) Files.delete(tempFile) } override private[annoy] def exists: Boolean = rfu.remoteExists(new URI(path)) } private[annoy] class AnnoyWriter(metric: AnnoyMetric, dim: Int, nTrees: Int) { private[this] val annoy4sIndex = metric match { case com.spotify.scio.extra.annoy.Angular => Annoy.annoyLib.createAngular(dim) case com.spotify.scio.extra.annoy.Euclidean => Annoy.annoyLib.createEuclidean(dim) } def addItem(item: Int, w: Array[Float]): Unit = { Annoy.annoyLib.addItem(annoy4sIndex, item, w) () } def save(filename: String): Unit = { Annoy.annoyLib.save(annoy4sIndex, filename) () } def build(): Unit = Annoy.annoyLib.build(annoy4sIndex, nTrees) def free(): Unit = Annoy.annoyLib.deleteIndex(annoy4sIndex) def size: Int = Annoy.annoyLib.getNItems(annoy4sIndex) def verbose(b: Boolean): Unit = Annoy.annoyLib.verbose(annoy4sIndex, b) }
Example 91
Source File: DistCache.scala From scio with Apache License 2.0 | 5 votes |
package com.spotify.scio.values import java.io.File import java.net.URI import com.spotify.scio.util.{RemoteFileUtil, ScioUtil} import org.apache.beam.sdk.options.PipelineOptions import scala.jdk.CollectionConverters._ def apply[F](initFn: () => F): DistCache[F] = new MockDistCacheFunc[F](initFn) } private[scio] class DistCacheSingle[F]( val uri: URI, val initFn: File => F, options: PipelineOptions ) extends FileDistCache[F](options) { verifyUri(uri) override protected def init: F = initFn(prepareFiles(Seq(uri)).head) } private[scio] class DistCacheMulti[F]( val uris: Seq[URI], val initFn: Seq[File] => F, options: PipelineOptions ) extends FileDistCache[F](options) { uris.foreach(verifyUri) override protected def init: F = initFn(prepareFiles(uris)) }
Example 92
Source File: ScioUtil.scala From scio with Apache License 2.0 | 5 votes |
package com.spotify.scio.util import java.net.URI import java.util.UUID import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.module.scala.DefaultScalaModule import com.spotify.scio.ScioContext import org.apache.beam.sdk.extensions.gcp.options.GcpOptions import org.apache.beam.sdk.extensions.gcp.util.Transport import org.apache.beam.sdk.{PipelineResult, PipelineRunner} import org.slf4j.LoggerFactory import scala.reflect.ClassTag import scala.util.{Failure, Success, Try} private[scio] object ScioUtil { @transient private lazy val log = LoggerFactory.getLogger(this.getClass) @transient lazy val jsonFactory = Transport.getJsonFactory def isLocalUri(uri: URI): Boolean = uri.getScheme == null || uri.getScheme == "file" def isRemoteUri(uri: URI): Boolean = !isLocalUri(uri) def isLocalRunner(runner: Class[_ <: PipelineRunner[_ <: PipelineResult]]): Boolean = { require(runner != null, "Pipeline runner not set!") // FIXME: cover Flink, Spark, etc. in local mode runner.getName == "org.apache.beam.runners.direct.DirectRunner" } def isRemoteRunner(runner: Class[_ <: PipelineRunner[_ <: PipelineResult]]): Boolean = !isLocalRunner(runner) def classOf[T: ClassTag]: Class[T] = implicitly[ClassTag[T]].runtimeClass.asInstanceOf[Class[T]] def getScalaJsonMapper: ObjectMapper = new ObjectMapper().registerModule(DefaultScalaModule) def addPartSuffix(path: String, ext: String = ""): String = if (path.endsWith("/")) s"${path}part-*$ext" else s"$path/part-*$ext" def getTempFile(context: ScioContext, fileOrPath: String = null): String = { val fop = Option(fileOrPath).getOrElse("scio-materialize-" + UUID.randomUUID().toString) val uri = URI.create(fop) if ((ScioUtil.isLocalUri(uri) && uri.toString.startsWith("/")) || uri.isAbsolute) { fop } else { val filename = fop val tmpDir = if (context.options.getTempLocation != null) { context.options.getTempLocation } else { val m = "Specify a temporary location via --tempLocation or PipelineOptions.setTempLocation." Try(context.optionsAs[GcpOptions].getGcpTempLocation) match { case Success(l) => log.warn( "Using GCP temporary location as a temporary location to materialize data. " + m ) l case Failure(_) => throw new IllegalArgumentException("No temporary location was specified. " + m) } } tmpDir + (if (tmpDir.endsWith("/")) "" else "/") + filename } } def pathWithShards(path: String): String = path.replaceAll("\\/+$", "") + "/part" }
Example 93
Source File: ScioContextIT.scala From scio with Apache License 2.0 | 5 votes |
package com.spotify.scio import java.net.URI import com.spotify.scio.testing.util.ItUtils import com.spotify.scio.util.ScioUtil import org.apache.beam.runners.core.construction.{PipelineTranslation, SdkComponents} import org.apache.beam.runners.dataflow.{DataflowPipelineTranslator, DataflowRunner} import org.apache.beam.sdk.extensions.gcp.options.GcpOptions import org.apache.beam.sdk.io.FileSystems import org.apache.beam.sdk.options.{PipelineOptions, PipelineOptionsFactory} import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import org.apache.beam.runners.dataflow.options.DataflowPipelineOptions import scala.jdk.CollectionConverters._ import com.spotify.scio.runners.dataflow.DataflowContext import org.apache.beam.runners.dataflow.options.DataflowPipelineWorkerPoolOptions import org.apache.beam.runners.dataflow.util.PackageUtil.StagedFile import org.apache.beam.model.pipeline.v1.RunnerApi import org.apache.beam.runners.core.construction.Environments import org.apache.beam.sdk.options.PortablePipelineOptions class ScioContextIT extends AnyFlatSpec with Matchers { "ScioContext" should "have temp location for DataflowRunner" in { val opts = PipelineOptionsFactory.create() opts.setRunner(classOf[DataflowRunner]) opts.as(classOf[GcpOptions]).setProject(ItUtils.project) verify(opts) } it should "support user defined temp location for DataflowRunner" in { val opts = PipelineOptionsFactory.create() opts.setRunner(classOf[DataflowRunner]) opts.as(classOf[GcpOptions]).setProject(ItUtils.project) opts.setTempLocation(ItUtils.gcpTempLocation("scio-context-it")) verify(opts) } private def verify(options: PipelineOptions): Unit = { val sc = ScioContext(options) val gcpTempLocation = sc.optionsAs[GcpOptions].getGcpTempLocation val tempLocation = sc.options.getTempLocation tempLocation should not be null gcpTempLocation should not be null tempLocation shouldBe gcpTempLocation ScioUtil.isRemoteUri(new URI(gcpTempLocation)) shouldBe true () } it should "register remote file systems in the test context" in { val sc = ScioContext.forTest() noException shouldBe thrownBy { FileSystems.matchSingleFileSpec("gs://data-integration-test-eu/shakespeare.json") } sc.run() } it should "#1734: generate a reasonably sized job graph" in { val opts = PipelineOptionsFactory.create() opts.setRunner(classOf[DataflowRunner]) opts.as(classOf[GcpOptions]).setProject(ItUtils.project) opts.as(classOf[DataflowPipelineOptions]).setRegion(ItUtils.Region) val sc = ScioContext(opts) sc.parallelize(1 to 100) val runner = DataflowRunner.fromOptions(sc.options) val sdkComponents = SdkComponents.create(); sdkComponents.registerEnvironment( Environments .createOrGetDefaultEnvironment(opts.as(classOf[PortablePipelineOptions])) .toBuilder() .addAllCapabilities(Environments.getJavaCapabilities()) .build() ) val pipelineProto = PipelineTranslation.toProto(sc.pipeline, sdkComponents, true) val jobSpecification = runner.getTranslator.translate( sc.pipeline, pipelineProto, sdkComponents, runner, Nil.asJava ) val newJob = jobSpecification.getJob() val graph = DataflowPipelineTranslator.jobToString(newJob) import com.fasterxml.jackson.databind.ObjectMapper val objectMapper = new ObjectMapper() val rootNode = objectMapper.readTree(graph) val path = "/steps/0/properties/output_info/0/encoding/component_encodings/0/@type" val coder = rootNode.at(path).asText coder should not(equal("org.apache.beam.sdk.coders.CustomCoder")) } }
Example 94
Source File: HttpClientProxy.scala From spark-sql-server with Apache License 2.0 | 5 votes |
package org.apache.livy.client.http import java.net.URI import java.util.Properties import org.apache.livy.client.common.HttpMessages.SessionInfo import org.apache.livyclient.common.CreateClientRequestWithProxyUser // Workaround: For starting a Livy session with `proxyUser` object HttpClientProxy { def start(uri: URI, proxyUser: String, params: Map[String, String]): Int = { val httpConf = new HttpConf(new Properties()) val sessionConf = new java.util.HashMap[String, String]() params.foreach { case (key, value) => sessionConf.put(key, value) } val create = new CreateClientRequestWithProxyUser(proxyUser, sessionConf) new LivyConnection(uri, httpConf).post(create, classOf[SessionInfo], "/").id } }
Example 95
Source File: VertxServerRequest.scala From tapir with Apache License 2.0 | 5 votes |
package sttp.tapir.server.vertx.decoders import java.net.{InetSocketAddress, URI} import io.vertx.scala.core.net.SocketAddress import io.vertx.scala.ext.web.RoutingContext import sttp.model.Method import sttp.tapir.model.{ConnectionInfo, ServerRequest} private[vertx] class VertxServerRequest(rc: RoutingContext) extends ServerRequest { private lazy val req = rc.request private lazy val _headers = req.headers lazy val connectionInfo: ConnectionInfo = { val conn = req.connection ConnectionInfo( Option(conn.localAddress).map(asInetSocketAddress), Option(conn.remoteAddress).map(asInetSocketAddress), Option(conn.isSsl) ) } override def method: Method = Method.apply(req.rawMethod) override def protocol: String = req.scheme.get override def uri: URI = new URI(req.uri) override def headers: Seq[(String, String)] = _headers.names.map { key => (key, _headers.get(key).get) }.toSeq override def header(name: String): Option[String] = _headers.get(name) private def asInetSocketAddress(address: SocketAddress): InetSocketAddress = InetSocketAddress.createUnresolved(address.host, address.port) }
Example 96
Source File: LinkCapturerSpec.scala From paradox with Apache License 2.0 | 5 votes |
package com.lightbend.paradox.markdown import java.net.URI import com.lightbend.paradox.tree.Tree.Location class LinkCapturerSpec extends MarkdownBaseSpec { private def capturerFor(pagePath: String, markdown: String): LinkCapturer = { val location = Location.forest(pages((pagePath, markdown))).get val context = writerContext(location) val linkCapturer = new LinkCapturer val serializer = linkCapturer.serializer(context) location.tree.label.markdown.accept(serializer) linkCapturer } private def linksFor(pagePath: String, markdown: String) = for { link <- capturerFor(pagePath, markdown).allLinks fragment <- link.fragments } yield (link.link.toString, fragment.fragment) "The LinkCapturer" should "classify relative links in same directory as relative" in { linksFor("foo/test.md", "[link](bar.html)") should ===(List(("foo/bar.html", None))) } it should "classify relative links in a child directory as relative" in { linksFor("foo/test.md", "[link](child/bar.html)") should ===(List(("foo/child/bar.html", None))) } it should "classify relative links in a parent directory as relative" in { linksFor("foo/test.md", "[link](../bar.html)") should ===(List(("bar.html", None))) } it should "classify relative links in a child of a parent directory as relative" in { linksFor("foo/test.md", "[link](../child/bar.html)") should ===(List(("child/bar.html", None))) } it should "ignore relative links outside of the docs directory when no base path is specified" in { linksFor("foo/test.md", "[link](../../bar.html)") shouldBe empty } it should "ignore absolute path links when no base path is specified" in { linksFor("foo/test.md", "[link](/bar.html)") shouldBe empty } it should "treat absolute path links as relative when a base path is specified" in { linksFor("/docs/foo/test.md", "[link](/bar.html)") should ===(List(("/bar.html", None))) } it should "accept relative links outside of the docs directory when a base path is specified" in { linksFor("/docs/foo/test.md", "[link](../../bar.html)") should ===(List(("/bar.html", None))) } it should "accept relative links in a child directory outside of the docs directory when a base path is specified" in { linksFor("/docs/foo/test.md", "[link](../../apidocs/bar.html)") should ===(List(("/apidocs/bar.html", None))) } it should "include the base path in links found in the docs tree when a base path is specified" in { linksFor("/docs/foo/test.md", "[link](bar.html)") should ===(List(("/docs/foo/bar.html", None))) } it should "not ignore invalid relative links (so they can be reported as missing later)" in { linksFor("/docs/foo/test.md", "[link](../../../bar.html)") should ===(List(("/../bar.html", None))) } it should "capture fragments" in { linksFor("foo/test.md", "[link](bar.html#frag)") should ===(List(("foo/bar.html", Some("frag")))) } it should "treat links with a hostname as absolute" in { linksFor("foo/test.md", "https://lightbend.com") should ===(List(("https://lightbend.com", None))) } it should "ignore ref links (because they are validated by the compiler)" in { linksFor("foo/test.md", "@ref[link](bar.md)") shouldBe empty } it should "append index.html to directory links" in { linksFor("foo/test.md", "[link](bar/)") should ===(List(("foo/bar/index.html", None))) } }
Example 97
package com.lightbend.paradox.markdown import java.net.{ URI, URISyntaxException } case class Error(reason: String) extends RuntimeException(reason) def apply(base: String): Url = { parse(base, s"template resulted in an invalid URL [$base]") } def parse(base: String, msg: String): Url = { try Url(new URI(base)) catch { case e: URISyntaxException => throw Url.Error(msg) } } } case class PropertyUrl(property: String, variables: String => Option[String]) { def base = variables(property) match { case Some(baseUrl) => baseUrl case None => throw Url.Error(s"property [$property] is not defined") } def resolve(): Url = { Url.parse(base, s"property [$property] contains an invalid URL [$base]") } def format(args: String*) = Url(base.format(args: _*)) def collect(f: PartialFunction[String, String]): Url = { PropertyUrl(property, variables(_).collect(f)).resolve } }
Example 98
Source File: WriteTransformer.scala From seahorse-workflow-executor with Apache License 2.0 | 5 votes |
package io.deepsense.deeplang.doperations import java.io.{File, IOException} import scala.reflect.runtime.{universe => ru} import io.deepsense.commons.utils.Version import io.deepsense.commons.utils.FileOperations.deleteRecursivelyIfExists import io.deepsense.deeplang.DOperation.Id import io.deepsense.deeplang.documentation.OperationDocumentation import io.deepsense.deeplang.doperables.Transformer import io.deepsense.deeplang.doperations.exceptions.DeepSenseIOException import io.deepsense.deeplang.params.{BooleanParam, Params, StringParam} import io.deepsense.deeplang.{DOperation1To0, ExecutionContext} import java.net.URI import org.apache.hadoop.fs.{FileSystem, Path} case class WriteTransformer() extends DOperation1To0[Transformer] with Params with OperationDocumentation { override val id: Id = "58368deb-68d0-4657-ae3f-145160cb1e2b" override val name: String = "Write Transformer" override val description: String = "Writes a Transformer to a directory" override val since: Version = Version(1, 1, 0) val shouldOverwrite = BooleanParam( name = "overwrite", description = Some("Should an existing transformer with the same name be overwritten?") ) setDefault(shouldOverwrite, true) def getShouldOverwrite: Boolean = $(shouldOverwrite) def setShouldOverwrite(value: Boolean): this.type = set(shouldOverwrite, value) val outputPath = StringParam( name = "output path", description = Some("The output path for writing the Transformer.")) def getOutputPath: String = $(outputPath) def setOutputPath(value: String): this.type = set(outputPath, value) val params: Array[io.deepsense.deeplang.params.Param[_]] = Array(outputPath, shouldOverwrite) override protected def execute(transformer: Transformer)(context: ExecutionContext): Unit = { val outputDictPath = getOutputPath try { if (getShouldOverwrite) { removeDirectory(context, outputDictPath) } transformer.save(context, outputDictPath) } catch { case e: IOException => logger.error(s"WriteTransformer error. Could not write transformer to the directory", e) throw DeepSenseIOException(e) } } private def removeDirectory(context: ExecutionContext, path: String): Unit = { if (path.startsWith("hdfs://")) { val configuration = context.sparkContext.hadoopConfiguration val hdfs = FileSystem.get(new URI(extractHdfsAddress(path)), configuration) hdfs.delete(new Path(path), true) } else { deleteRecursivelyIfExists(new File(path)) } } private def extractHdfsAddress(path: String): String = { // first group: "hdfs://ip.addr.of.hdfs", second group: "/some/path/on/hdfs" val regex = "(hdfs:\\/\\/[^\\/]*)(.*)".r val regex(hdfsAddress, _) = path hdfsAddress } @transient override lazy val tTagTI_0: ru.TypeTag[Transformer] = ru.typeTag[Transformer] } object WriteTransformer { def apply(outputPath: String): WriteTransformer = { new WriteTransformer().setOutputPath(outputPath) } }
Example 99
Source File: PluginXmlDetector.scala From sbt-idea-plugin with Apache License 2.0 | 5 votes |
package org.jetbrains.sbtidea.download import java.net.URI import java.nio.file.{FileSystems, Files, Path} import java.util.Collections import java.util.function.Predicate private class PluginXmlDetector extends Predicate[Path] { import org.jetbrains.sbtidea.packaging.artifact._ private val MAP = Collections.emptyMap[String, Any]() var result: String = _ override def test(t: Path): Boolean = { if (!t.toString.endsWith(".jar")) return false val uri = URI.create(s"jar:${t.toUri}") try { using(FileSystems.newFileSystem(uri, MAP)) { fs => val maybePluginXml = fs.getPath("META-INF", "plugin.xml") if (Files.exists(maybePluginXml)) { result = new String(Files.readAllBytes(maybePluginXml)) true } else { false } } } catch { case e: java.util.zip.ZipError => throw new RuntimeException(s"Corrupt zip file: $t", e) } } }
Example 100
Source File: PluginMock.scala From sbt-idea-plugin with Apache License 2.0 | 5 votes |
package org.jetbrains.sbtidea.download.plugin import java.net.URI import java.nio.file.{FileSystems, Files, Path} import org.jetbrains.sbtidea.TmpDirUtils import org.jetbrains.sbtidea.packaging.artifact import org.jetbrains.sbtidea.Keys._ import org.jetbrains.sbtidea.download.plugin.PluginDescriptor.Dependency import scala.collection.JavaConverters._ trait PluginMock extends TmpDirUtils { implicit class PluginMetaDataExt(metadata: PluginDescriptor) { def toPluginId: IntellijPlugin.Id = IntellijPlugin.Id(metadata.id, Some(metadata.version), None) } protected def createPluginJarMock(metaData: PluginDescriptor): Path = { val tmpDir = newTmpDir val targetPath = tmpDir.resolve(s"${metaData.name}.jar") val targetUri = URI.create("jar:" + targetPath.toUri) val opts = Map("create" -> "true").asJava artifact.using(FileSystems.newFileSystem(targetUri, opts)) { fs => Files.createDirectory(fs.getPath("/", "META-INF")) Files.write( fs.getPath("/", "META-INF", "plugin.xml"), createPluginXmlContent(metaData).getBytes ) } targetPath } protected def createPluginZipMock(metaData: PluginDescriptor): Path = { val tmpDir = newTmpDir val targetPath = tmpDir.resolve(s"${metaData.name}.zip") val targetUri = URI.create("jar:" + targetPath.toUri) val opts = Map("create" -> "true").asJava val mainPluginJar = createPluginJarMock(metaData) artifact.using(FileSystems.newFileSystem(targetUri, opts)) { fs => val libRoot = fs.getPath("/", metaData.name, "lib") Files.createDirectories(libRoot) Files.copy( mainPluginJar, libRoot.resolve(mainPluginJar.getFileName.toString) ) } targetPath } protected def createPluginXmlContent(metaData: PluginDescriptor): String = { val depStr = metaData.dependsOn.map { case Dependency(id, true) => s"""<depends optional="true">$id</depends>""" case Dependency(id, false) => s"<depends>$id</depends>" } s""" |<idea-plugin> | <name>${metaData.name}</name> | <id>${metaData.id}</id> | <version>${metaData.version}</version> | <idea-version since-build="${metaData.sinceBuild}" until-build="${metaData.untilBuild}"/> | ${depStr.mkString("\n")} |</idea-plugin> |""".stripMargin } }
Example 101
Source File: IdeaMock.scala From sbt-idea-plugin with Apache License 2.0 | 5 votes |
package org.jetbrains.sbtidea.download.idea import java.net.{URI, URL} import java.nio.file.{Files, Path, Paths} import java.util.zip.{ZipEntry, ZipInputStream} import org.jetbrains.sbtidea.download.BuildInfo import org.jetbrains.sbtidea.packaging.artifact import org.jetbrains.sbtidea.{Keys, TmpDirUtils} import org.jetbrains.sbtidea.Keys._ import org.jetbrains.sbtidea.download.jbr.JbrDependency trait IdeaMock extends TmpDirUtils { protected val IDEA_VERSION = "192.5728.12" protected val IDEA_EDITION = "IU" protected val IDEA_DIST = s"idea$IDEA_EDITION-$IDEA_VERSION.zip" protected val IDEA_DIST_PATH = s"/org/jetbrains/sbtidea/download/$IDEA_DIST" protected val IDEA_BUILDINFO: BuildInfo = BuildInfo(IDEA_VERSION, Keys.IntelliJPlatform.IdeaUltimate, Some(JbrDependency.VERSION_AUTO)) protected val IDEA_DEP: IdeaDependency = IdeaDependency(IDEA_BUILDINFO) protected val IDEA_ART: IdeaDist = IdeaDistImpl(IDEA_DEP, new URL("file:")) protected val bundledPlugins: List[Keys.IntellijPlugin] = "org.jetbrains.plugins.yaml".toPlugin :: "com.intellij.properties".toPlugin :: Nil protected def installIdeaMock: Path = { val tmpDir = newTmpDir val installDir = Files.createDirectory(tmpDir.resolve(IDEA_VERSION)) val stream = getClass.getResourceAsStream(IDEA_DIST_PATH) artifact.using(new ZipInputStream(stream)) { zip => var entry: ZipEntry = zip.getNextEntry while (entry != null) { val toPath = installDir.resolve(entry.getName) if (entry.isDirectory) Files.createDirectory(toPath) else Files.copy(zip, toPath) entry = zip.getNextEntry } } installDir } protected def getDistCopy: Path = Files.copy(getIdeaDistMockPath, newTmpDir.resolve(IDEA_DIST)) protected def getIdeaDistMockURI: URI = getClass.getResource(IDEA_DIST_PATH).toURI protected def getIdeaDistMockPath: Path = Paths.get(getIdeaDistMockURI) }
Example 102
Source File: JavaExtraFormatsSpec.scala From sjson-new with Apache License 2.0 | 5 votes |
package sjsonnew package support.spray import spray.json.{ JsValue, JsNumber, JsString, JsNull, JsTrue, JsFalse, JsObject } import org.specs2.mutable._ import java.util.{ UUID, Optional } import java.net.{ URI, URL } import java.io.File class JavaExtraFormatsSpec extends Specification with BasicJsonProtocol { case class Person(name: Optional[String], value: Optional[Int]) implicit object PersonFormat extends JsonFormat[Person] { def write[J](x: Person, builder: Builder[J]): Unit = { builder.beginObject() builder.addField("name", x.name) builder.addField("value", x.value) builder.endObject() } def read[J](jsOpt: Option[J], unbuilder: Unbuilder[J]): Person = jsOpt match { case Some(js) => unbuilder.beginObject(js) val name = unbuilder.readField[Optional[String]]("name") val value = unbuilder.readField[Optional[Int]]("value") unbuilder.endObject() Person(name, value) case None => deserializationError("Expected JsObject but found None") } } "The uuidStringIso" should { val uuid = UUID.fromString("abc220ea-2a01-11e6-b67b-9e71128cae77") "convert a UUID to JsString" in { Converter.toJsonUnsafe(uuid) mustEqual JsString("abc220ea-2a01-11e6-b67b-9e71128cae77") } "convert the JsString back to the UUID" in { Converter.fromJsonUnsafe[UUID](JsString("abc220ea-2a01-11e6-b67b-9e71128cae77")) mustEqual uuid } } "The uriStringIso" should { val uri = new URI("http://localhost") "convert a URI to JsString" in { Converter.toJsonUnsafe(uri) mustEqual JsString("http://localhost") } "convert the JsString back to the URI" in { Converter.fromJsonUnsafe[URI](JsString("http://localhost")) mustEqual uri } } "The urlStringIso" should { val url = new URL("http://localhost") "convert a URL to JsString" in { Converter.toJsonUnsafe(url) mustEqual JsString("http://localhost") } "convert the JsString back to the URI" in { Converter.fromJsonUnsafe[URL](JsString("http://localhost")) mustEqual url } } "The fileStringIso" should { val f = new File("/tmp") val f2 = new File(new File("src"), "main") "convert a File to JsString" in { Converter.toJsonUnsafe(f) mustEqual JsString("file:///tmp/") } "convert a relative path to JsString" in { // https://tools.ietf.org/html/rfc3986#section-4.2 Converter.toJsonUnsafe(f2) mustEqual JsString("src/main") } "convert the JsString back to the File" in { Converter.fromJsonUnsafe[File](JsString("file:///tmp/")) mustEqual f } "convert the JsString back to the relative path" in { Converter.fromJsonUnsafe[File](JsString("src/main")) mustEqual f2 } } "The optionalFormat" should { "convert Optional.empty to JsNull" in { Converter.toJsonUnsafe(Optional.empty[Int]) mustEqual JsNull } "convert JsNull to None" in { Converter.fromJsonUnsafe[Optional[Int]](JsNull) mustEqual Optional.empty[Int] } "convert Some(Hello) to JsString(Hello)" in { Converter.toJsonUnsafe(Optional.of("Hello")) mustEqual JsString("Hello") } "convert JsString(Hello) to Some(Hello)" in { Converter.fromJsonUnsafe[Optional[String]](JsString("Hello")) mustEqual Optional.of("Hello") } "omit None fields" in { Converter.toJsonUnsafe(Person(Optional.empty[String], Optional.empty[Int])) mustEqual JsObject() } } }
Example 103
Source File: JavaExtraFormats.scala From sjson-new with Apache License 2.0 | 5 votes |
package sjsonnew import java.util.{ UUID, Optional } import java.net.{ URI, URL } import java.io.File import java.math.{ BigInteger, BigDecimal => JBigDecimal } trait JavaExtraFormats { this: PrimitiveFormats with AdditionalFormats with IsoFormats => private[this] type JF[A] = JsonFormat[A] // simple alias for reduced verbosity implicit val javaBigIntegerFormat: JF[BigInteger] = projectFormat[BigInteger, BigInt](BigInt.apply, _.bigInteger) implicit val javaBigDecimalFormat: JF[JBigDecimal] = projectFormat[JBigDecimal, BigDecimal](BigDecimal.apply, _.bigDecimal) implicit val uuidStringIso: IsoString[UUID] = IsoString.iso[UUID]( _.toString, UUID.fromString) implicit val uriStringIso: IsoString[URI] = IsoString.iso[URI]( _.toASCIIString, new URI(_)) implicit val urlStringIso: IsoString[URL] = IsoString.iso[URL]( _.toURI.toASCIIString, (s: String) => (new URI(s)).toURL) private[this] final val FileScheme = "file" implicit val fileStringIso: IsoString[File] = IsoString.iso[File]( (f: File) => { if (f.isAbsolute) { f.toPath.toUri.toASCIIString } else { new URI(null, normalizeName(f.getPath), null).toASCIIString } }, (s: String) => uriToFile(new URI(s))) private[this] def normalizeName(name: String) = { val sep = File.separatorChar if (sep == '/') name else name.replace(sep, '/') } private[this] def uriToFile(uri: URI): File = { val part = uri.getSchemeSpecificPart // scheme might be omitted for relative URI reference. assert( Option(uri.getScheme) match { case None | Some(FileScheme) => true case _ => false }, s"Expected protocol to be '$FileScheme' or empty in URI $uri" ) Option(uri.getAuthority) match { case None if part startsWith "/" => new File(uri) case _ => if (!(part startsWith "/") && (part contains ":")) new File("//" + part) else new File(part) } } implicit def optionalFormat[A :JF]: JF[Optional[A]] = new OptionalFormat[A] final class OptionalFormat[A :JF] extends JF[Optional[A]] { lazy val elemFormat = implicitly[JF[A]] def write[J](o: Optional[A], builder: Builder[J]): Unit = if (o.isPresent) elemFormat.write(o.get, builder) else builder.writeNull override def addField[J](name: String, o: Optional[A], builder: Builder[J]): Unit = if (o.isPresent) { builder.addFieldName(name) write(o, builder) } else () def read[J](jsOpt: Option[J], unbuilder: Unbuilder[J]): Optional[A] = jsOpt match { case Some(js) => if (unbuilder.isJnull(js)) Optional.empty[A] else Optional.ofNullable(elemFormat.read(jsOpt, unbuilder)) case None => Optional.empty[A] } } }
Example 104
Source File: CatalogUtils.scala From spark-atlas-connector with Apache License 2.0 | 5 votes |
package com.hortonworks.spark.atlas.utils import java.net.URI import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog.{CatalogDatabase, CatalogStorageFormat, CatalogTable, CatalogTableType} import org.apache.spark.sql.types.StructType object CatalogUtils { def createDB(name: String, location: String): CatalogDatabase = { CatalogDatabase(name, "", new URI(location), Map.empty) } def createStorageFormat( locationUri: Option[URI] = None, inputFormat: Option[String] = None, outputFormat: Option[String] = None, serd: Option[String] = None, compressed: Boolean = false, properties: Map[String, String] = Map.empty): CatalogStorageFormat = { CatalogStorageFormat(locationUri, inputFormat, outputFormat, serd, compressed, properties) } def createTable( db: String, table: String, schema: StructType, storage: CatalogStorageFormat, isHiveTable: Boolean = false): CatalogTable = { CatalogTable( TableIdentifier(table, Some(db)), CatalogTableType.MANAGED, storage, schema, provider = if (isHiveTable) Some("hive") else None) } }
Example 105
Source File: TestUtils.scala From spark-atlas-connector with Apache License 2.0 | 5 votes |
package com.hortonworks.spark.atlas import java.net.URI import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog.{CatalogDatabase, CatalogStorageFormat, CatalogTable, CatalogTableType} import org.apache.spark.sql.types.StructType import com.hortonworks.spark.atlas.utils.SparkUtils import org.apache.atlas.model.instance.AtlasObjectId object TestUtils { def createDB(name: String, location: String): CatalogDatabase = { CatalogDatabase(name, "", new URI(location), Map.empty) } def createStorageFormat( locationUri: Option[URI] = None, inputFormat: Option[String] = None, outputFormat: Option[String] = None, serd: Option[String] = None, compressed: Boolean = false, properties: Map[String, String] = Map.empty): CatalogStorageFormat = { CatalogStorageFormat(locationUri, inputFormat, outputFormat, serd, compressed, properties) } def createTable( db: String, table: String, schema: StructType, storage: CatalogStorageFormat, isHiveTable: Boolean = false): CatalogTable = { CatalogTable( TableIdentifier(table, Some(db)), CatalogTableType.MANAGED, storage, schema, provider = if (isHiveTable) Some("hive") else None, bucketSpec = None, owner = SparkUtils.currUser()) } def assertSubsetOf[T](set: Set[T], subset: Set[T]): Unit = { assert(subset.subsetOf(set), s"$subset is not a subset of $set") } def findEntity( entities: Seq[SACAtlasReferenceable], objId: AtlasObjectId): Option[SACAtlasReferenceable] = { entities.find(p => p.asObjectId == objId) } def findEntities( entities: Seq[SACAtlasReferenceable], objIds: Seq[AtlasObjectId]): Seq[SACAtlasReferenceable] = { entities.filter(p => objIds.contains(p.asObjectId)) } }
Example 106
Source File: ZioSqsMockServer.scala From zio-sqs with Apache License 2.0 | 5 votes |
package zio.sqs import java.net.URI import org.elasticmq.rest.sqs.{ SQSRestServer, SQSRestServerBuilder } import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider } import software.amazon.awssdk.regions.Region import software.amazon.awssdk.services.sqs.SqsAsyncClient import zio.{ Task, UIO, ZIO, ZManaged } object ZioSqsMockServer { private val staticCredentialsProvider: StaticCredentialsProvider = StaticCredentialsProvider.create(AwsBasicCredentials.create("key", "key")) private val uri = new URI("http://localhost:9324") private val region: Region = Region.AP_NORTHEAST_2 val serverResource: Task[ZManaged[Any, Throwable, SQSRestServer]] = ZIO.effect( ZManaged.make( Task(SQSRestServerBuilder.start()) )(server => UIO.effectTotal(server.stopAndWait())) ) val clientResource: Task[ZManaged[Any, Throwable, SqsAsyncClient]] = ZIO.effect( ZManaged.make( Task { SqsAsyncClient .builder() .region(region) .credentialsProvider( staticCredentialsProvider ) .endpointOverride(uri) .build() } )(client => UIO.effectTotal(client.close())) ) }
Example 107
Source File: Util.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.util import java.io.{BufferedReader, File, FileInputStream, InputStreamReader} import java.net.{ServerSocket, URI} import scala.concurrent.forkjoin.ThreadLocalRandom import scala.sys.process.Process import scala.util.{Failure, Success, Try} import com.typesafe.config.{Config, ConfigFactory} import org.apache.gearpump.cluster.AppJar import org.apache.gearpump.jarstore.JarStoreClient import org.apache.gearpump.transport.HostPort object Util { val LOG = LogUtil.getLogger(getClass) private val defaultUri = new URI("file:///") private val appNamePattern = "^[a-zA-Z_][a-zA-Z0-9_]+$".r.pattern def validApplicationName(appName: String): Boolean = { appNamePattern.matcher(appName).matches() } def getCurrentClassPath: Array[String] = { val classpath = System.getProperty("java.class.path") val classpathList = classpath.split(File.pathSeparator) classpathList } def version: String = { val home = System.getProperty(Constants.GEARPUMP_HOME) val version = Try { val versionFile = new FileInputStream(new File(home, "VERSION")) val reader = new BufferedReader(new InputStreamReader(versionFile)) val version = reader.readLine().replace("version:=", "") versionFile.close() version } version match { case Success(version) => version case Failure(ex) => LOG.error("failed to read VERSION file, " + ex.getMessage) "Unknown-Version" } } def startProcess(options: Array[String], classPath: Array[String], mainClass: String, arguments: Array[String]): RichProcess = { val java = System.getProperty("java.home") + "/bin/java" val command = List(java) ++ options ++ List("-cp", classPath.mkString(File.pathSeparator), mainClass) ++ arguments LOG.info(s"Starting executor process java $mainClass ${arguments.mkString(" ")} " + s"\n ${options.mkString(" ")}") val logger = new ProcessLogRedirector() val process = Process(command).run(logger) new RichProcess(process, logger) } def resolveJvmSetting(conf: Config): AppJvmSettings = { import org.apache.gearpump.util.Constants._ val appMasterVMArgs = Try(conf.getString(GEARPUMP_APPMASTER_ARGS).split("\\s+") .filter(_.nonEmpty)).toOption val executorVMArgs = Try(conf.getString(GEARPUMP_EXECUTOR_ARGS).split("\\s+") .filter(_.nonEmpty)).toOption val appMasterClassPath = Try( conf.getString(GEARPUMP_APPMASTER_EXTRA_CLASSPATH) .split("[;:]").filter(_.nonEmpty)).toOption val executorClassPath = Try( conf.getString(GEARPUMP_EXECUTOR_EXTRA_CLASSPATH) .split(File.pathSeparator).filter(_.nonEmpty)).toOption AppJvmSettings( JvmSetting(appMasterVMArgs.getOrElse(Array.empty[String]), appMasterClassPath.getOrElse(Array.empty[String])), JvmSetting(executorVMArgs .getOrElse(Array.empty[String]), executorClassPath.getOrElse(Array.empty[String]))) } def asSubDirOfGearpumpHome(dir: String): File = { new File(System.getProperty(Constants.GEARPUMP_HOME), dir) } }
Example 108
Source File: Export.scala From seqspark with Apache License 2.0 | 5 votes |
package org.dizhang.seqspark.worker import java.net.URI import java.nio.file.{Files, Path, Paths} import org.dizhang.seqspark.ds.Genotype import org.dizhang.seqspark.ds.VCF._ import org.dizhang.seqspark.util.SeqContext import org.dizhang.seqspark.util.UserConfig.hdfs import org.apache.hadoop import org.slf4j.LoggerFactory import scala.collection.JavaConverters._ object Export { private val logger = LoggerFactory.getLogger(getClass) def apply[A: Genotype](data: Data[A])(implicit ssc: SeqContext): Unit = { val geno = implicitly[Genotype[A]] val conf = ssc.userConfig.output.genotype if (conf.export) { val path = if (conf.path.isEmpty) ssc.userConfig.input.genotype.path + "." + ssc.userConfig.project else conf.path logger.info(s"going to export data to $path") if (path.startsWith("file:")) { val p = Paths.get(URI.create(path)) if (Files.exists(p)) { Files.walk(p) .iterator() .asScala .toList .sorted(Ordering[Path].reverse) .foreach(f => Files.delete(f)) } } else { val hdPath = new hadoop.fs.Path(path) if (hdfs.exists(hdPath)) { hdfs.delete(hdPath, true) } } data.samples(conf.samples).saveAsTextFile(path) } if (conf.save || conf.cache) { data.saveAsObjectFile(conf.path) } } }
Example 109
Source File: AddJar.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.magic.builtin import java.io.{File, PrintStream} import java.net.{URL, URI} import java.nio.file.{Files, Paths} import java.util.zip.ZipFile import org.apache.toree.magic._ import org.apache.toree.magic.builtin.AddJar._ import org.apache.toree.magic.dependencies._ import org.apache.toree.utils.{ArgumentParsingSupport, DownloadSupport, LogLike, FileUtils} import com.typesafe.config.Config import org.apache.hadoop.fs.Path import org.apache.toree.plugins.annotations.Event object AddJar { val HADOOP_FS_SCHEMES = Set("hdfs", "s3", "s3n", "file") private var jarDir:Option[String] = None def getJarDir(config: Config): String = { jarDir.getOrElse({ jarDir = Some( if(config.hasPath("jar_dir") && Files.exists(Paths.get(config.getString("jar_dir")))) { config.getString("jar_dir") } else { FileUtils.createManagedTempDirectory("toree_add_jars").getAbsolutePath } ) jarDir.get }) } } class AddJar extends LineMagic with IncludeInterpreter with IncludeOutputStream with DownloadSupport with ArgumentParsingSupport with IncludeKernel with IncludePluginManager with IncludeConfig with LogLike { // Option to mark re-downloading of jars private val _force = parser.accepts("f", "forces re-download of specified jar") // Option to mark re-downloading of jars private val _magic = parser.accepts("magic", "loads jar as a magic extension") // Lazy because the outputStream is not provided at construction private def printStream = new PrintStream(outputStream) ) } else { downloadFile( new URL(jarRemoteLocation), new File(downloadLocation).toURI.toURL ) } // Report download finished printStream.println(s"Finished download of $jarName") } else { printStream.println(s"Using cached version of $jarName") } // validate jar file if(! isValidJar(fileDownloadLocation)) { throw new IllegalArgumentException(s"Jar '$jarName' is not valid.") } if (_magic) { val plugins = pluginManager.loadPlugins(fileDownloadLocation) pluginManager.initializePlugins(plugins) } else { kernel.addJars(fileDownloadLocation.toURI) } } }
Example 110
Source File: P2PRpc.scala From bitcoin-s with MIT License | 5 votes |
package org.bitcoins.rpc.client.common import java.net.URI import org.bitcoins.commons.jsonmodels.bitcoind.RpcOpts.{ AddNodeArgument, SetBanCommand } import org.bitcoins.commons.jsonmodels.bitcoind._ import org.bitcoins.commons.serializers.JsonSerializers._ import org.bitcoins.core.protocol.blockchain.Block import play.api.libs.json.{JsBoolean, JsNumber, JsString} import scala.concurrent.Future trait P2PRpc { self: Client => def addNode(address: URI, command: AddNodeArgument): Future[Unit] = { bitcoindCall[Unit]( "addnode", List(JsString(address.getAuthority), JsString(command.toString))) } def clearBanned(): Future[Unit] = { bitcoindCall[Unit]("clearbanned") } def disconnectNode(address: URI): Future[Unit] = { bitcoindCall[Unit]("disconnectnode", List(JsString(address.getAuthority))) } def getAddedNodeInfo: Future[Vector[Node]] = getAddedNodeInfo(None) private def getAddedNodeInfo(node: Option[URI]): Future[Vector[Node]] = { val params = if (node.isEmpty) { List.empty } else { List(JsString(node.get.getAuthority)) } bitcoindCall[Vector[Node]]("getaddednodeinfo", params) } def getAddedNodeInfo(node: URI): Future[Vector[Node]] = getAddedNodeInfo(Some(node)) def getConnectionCount: Future[Int] = { bitcoindCall[Int]("getconnectioncount") } def getNetTotals: Future[GetNetTotalsResult] = { bitcoindCall[GetNetTotalsResult]("getnettotals") } def getNetworkInfo: Future[GetNetworkInfoResult] = { bitcoindCall[GetNetworkInfoResult]("getnetworkinfo") } def getPeerInfo: Future[Vector[Peer]] = { bitcoindCall[Vector[Peer]]("getpeerinfo") } def listBanned: Future[Vector[NodeBan]] = { bitcoindCall[Vector[NodeBan]]("listbanned") } def setBan( address: URI, command: SetBanCommand, banTime: Int = 86400, absolute: Boolean = false): Future[Unit] = { bitcoindCall[Unit]("setban", List(JsString(address.getAuthority), JsString(command.toString), JsNumber(banTime), JsBoolean(absolute))) } def setNetworkActive(activate: Boolean): Future[Unit] = { bitcoindCall[Unit]("setnetworkactive", List(JsBoolean(activate))) } def submitBlock(block: Block): Future[Unit] = { bitcoindCall[Unit]("submitblock", List(JsString(block.hex))) } }
Example 111
Source File: Spark2JobApiIT.scala From incubator-livy with Apache License 2.0 | 5 votes |
package org.apache.livy.test import java.io.File import java.net.URI import java.util.concurrent.{TimeUnit, Future => JFuture} import javax.servlet.http.HttpServletResponse import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.module.scala.DefaultScalaModule import org.scalatest.BeforeAndAfterAll import org.apache.http.client.methods.HttpGet import org.apache.livy._ import org.apache.livy.client.common.HttpMessages._ import org.apache.livy.sessions.SessionKindModule import org.apache.livy.test.framework.BaseIntegrationTestSuite import org.apache.livy.test.jobs.spark2._ class Spark2JobApiIT extends BaseIntegrationTestSuite with BeforeAndAfterAll with Logging { private var client: LivyClient = _ private var sessionId: Int = _ private val mapper = new ObjectMapper() .registerModule(DefaultScalaModule) .registerModule(new SessionKindModule()) override def afterAll(): Unit = { super.afterAll() if (client != null) { client.stop(true) } livyClient.connectSession(sessionId).stop() } test("create a new session and upload test jar") { val prevSessionCount = sessionList().total val tempClient = createClient(livyEndpoint) try { // Figure out the session ID by poking at the REST endpoint. We should probably expose this // in the Java API. val list = sessionList() assert(list.total === prevSessionCount + 1) val tempSessionId = list.sessions(0).id livyClient.connectSession(tempSessionId).verifySessionIdle() waitFor(tempClient.uploadJar(new File(testLib))) client = tempClient sessionId = tempSessionId } finally { if (client == null) { try { if (tempClient != null) { tempClient.stop(true) } } catch { case e: Exception => warn("Error stopping client.", e) } } } } test("run spark2 job") { assume(client != null, "Client not active.") val result = waitFor(client.submit(new SparkSessionTest())) assert(result === 3) } test("run spark2 dataset job") { assume(client != null, "Client not active.") val result = waitFor(client.submit(new DatasetTest())) assert(result === 2) } private def waitFor[T](future: JFuture[T]): T = { future.get(60, TimeUnit.SECONDS) } private def sessionList(): SessionList = { val httpGet = new HttpGet(s"$livyEndpoint/sessions/") val r = livyClient.httpClient.execute(httpGet) val statusCode = r.getStatusLine().getStatusCode() val responseBody = r.getEntity().getContent val sessionList = mapper.readValue(responseBody, classOf[SessionList]) r.close() assert(statusCode == HttpServletResponse.SC_OK) sessionList } private def createClient(uri: String): LivyClient = { new LivyClientBuilder().setURI(new URI(uri)).build() } }
Example 112
Source File: SessionSpec.scala From incubator-livy with Apache License 2.0 | 5 votes |
package org.apache.livy.sessions import java.net.URI import org.scalatest.FunSuite import org.apache.livy.{LivyBaseUnitTestSuite, LivyConf} class SessionSpec extends FunSuite with LivyBaseUnitTestSuite { test("use default fs in paths") { val conf = new LivyConf(false) conf.hadoopConf.set("fs.defaultFS", "dummy:///") val uris = Seq("http://example.com/foo", "hdfs:/bar", "/baz", "/foo#bar") val expected = Seq(uris(0), uris(1), "dummy:///baz", "dummy:///foo#bar") assert(Session.resolveURIs(uris, conf) === expected) intercept[IllegalArgumentException] { Session.resolveURI(new URI("relative_path"), conf) } } test("local fs whitelist") { val conf = new LivyConf(false) conf.set(LivyConf.LOCAL_FS_WHITELIST, "/allowed/,/also_allowed") Seq("/allowed/file", "/also_allowed/file").foreach { path => assert(Session.resolveURI(new URI(path), conf) === new URI("file://" + path)) } Seq("/not_allowed", "/allowed_not_really").foreach { path => intercept[IllegalArgumentException] { Session.resolveURI(new URI(path), conf) } } } test("conf validation and preparation") { val conf = new LivyConf(false) conf.hadoopConf.set("fs.defaultFS", "dummy:///") conf.set(LivyConf.LOCAL_FS_WHITELIST, "/allowed") // Test baseline. assert(Session.prepareConf(Map(), Nil, Nil, Nil, Nil, conf) === Map("spark.master" -> "local")) // Test validations. intercept[IllegalArgumentException] { Session.prepareConf(Map("spark.do_not_set" -> "1"), Nil, Nil, Nil, Nil, conf) } conf.sparkFileLists.foreach { key => intercept[IllegalArgumentException] { Session.prepareConf(Map(key -> "file:/not_allowed"), Nil, Nil, Nil, Nil, conf) } } intercept[IllegalArgumentException] { Session.prepareConf(Map(), Seq("file:/not_allowed"), Nil, Nil, Nil, conf) } intercept[IllegalArgumentException] { Session.prepareConf(Map(), Nil, Seq("file:/not_allowed"), Nil, Nil, conf) } intercept[IllegalArgumentException] { Session.prepareConf(Map(), Nil, Nil, Seq("file:/not_allowed"), Nil, conf) } intercept[IllegalArgumentException] { Session.prepareConf(Map(), Nil, Nil, Nil, Seq("file:/not_allowed"), conf) } // Test that file lists are merged and resolved. val base = "/file1.txt" val other = Seq("/file2.txt") val expected = Some(Seq("dummy://" + other(0), "dummy://" + base).mkString(",")) val userLists = Seq(LivyConf.SPARK_JARS, LivyConf.SPARK_FILES, LivyConf.SPARK_ARCHIVES, LivyConf.SPARK_PY_FILES) val baseConf = userLists.map { key => (key -> base) }.toMap val result = Session.prepareConf(baseConf, other, other, other, other, conf) userLists.foreach { key => assert(result.get(key) === expected) } } }
Example 113
Source File: ReplDriverSuite.scala From incubator-livy with Apache License 2.0 | 5 votes |
package org.apache.livy.repl import java.net.URI import java.util.concurrent.TimeUnit import scala.concurrent.duration._ import scala.language.postfixOps import org.apache.spark.launcher.SparkLauncher import org.json4s._ import org.json4s.jackson.JsonMethods._ import org.scalatest.FunSuite import org.scalatest.concurrent.Eventually._ import org.apache.livy._ import org.apache.livy.rsc.{PingJob, RSCClient, RSCConf} import org.apache.livy.sessions.Spark class ReplDriverSuite extends FunSuite with LivyBaseUnitTestSuite { private implicit val formats = DefaultFormats test("start a repl session using the rsc") { val client = new LivyClientBuilder() .setConf(SparkLauncher.DRIVER_MEMORY, "512m") .setConf(SparkLauncher.DRIVER_EXTRA_CLASSPATH, sys.props("java.class.path")) .setConf(SparkLauncher.EXECUTOR_EXTRA_CLASSPATH, sys.props("java.class.path")) .setConf(RSCConf.Entry.LIVY_JARS.key(), "") .setURI(new URI("rsc:/")) .setConf(RSCConf.Entry.DRIVER_CLASS.key(), classOf[ReplDriver].getName()) .setConf(RSCConf.Entry.SESSION_KIND.key(), Spark.toString) .build() .asInstanceOf[RSCClient] try { // This is sort of what InteractiveSession.scala does to detect an idle session. client.submit(new PingJob()).get(60, TimeUnit.SECONDS) val statementId = client.submitReplCode("1 + 1", "spark").get eventually(timeout(30 seconds), interval(100 millis)) { val rawResult = client.getReplJobResults(statementId, 1).get(10, TimeUnit.SECONDS).statements(0) val result = rawResult.output assert((parse(result) \ Session.STATUS).extract[String] === Session.OK) } } finally { client.stop(true) } } }
Example 114
Source File: ServiceRegistrationModule.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.server import java.net.URI import java.util.function.{ Function => JFunction } import akka.actor.CoordinatedShutdown import akka.Done import akka.NotUsed import com.lightbend.lagom.internal.javadsl.registry.ServiceRegistry import com.lightbend.lagom.internal.javadsl.registry.ServiceRegistryService import com.lightbend.lagom.internal.javadsl.server.ResolvedServices import com.lightbend.lagom.devmode.internal.registry.serviceDnsRecords import com.typesafe.config.Config import javax.inject.Inject import javax.inject.Provider import javax.inject.Singleton import play.api.inject.Binding import play.api.inject.Module import play.api.Configuration import play.api.Environment import play.api.Logger import scala.compat.java8.FutureConverters.CompletionStageOps import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.collection.JavaConverters._ import scala.collection.immutable class ServiceRegistrationModule extends Module { override def bindings(environment: Environment, configuration: Configuration): Seq[Binding[_]] = Seq( bind[ServiceRegistrationModule.RegisterWithServiceRegistry].toSelf.eagerly(), bind[ServiceRegistrationModule.ServiceConfig].toProvider[ServiceRegistrationModule.ServiceConfigProvider] ) } object ServiceRegistrationModule { class ServiceConfigProvider @Inject() (config: Config) extends Provider[ServiceConfig] { override lazy val get = ServiceConfig(serviceDnsRecords(config)) } case class ServiceConfig(uris: immutable.Seq[URI]) @Singleton private class RegisterWithServiceRegistry @Inject() ( coordinatedShutdown: CoordinatedShutdown, resolvedServices: ResolvedServices, config: ServiceConfig, registry: ServiceRegistry )(implicit ec: ExecutionContext) { private lazy val logger: Logger = Logger(this.getClass()) private val locatableServices = resolvedServices.services.filter(_.descriptor.locatableService) coordinatedShutdown.addTask( CoordinatedShutdown.PhaseBeforeServiceUnbind, "unregister-services-from-service-locator-javadsl" ) { () => Future .sequence(locatableServices.map { service => registry.unregister(service.descriptor.name).invoke().toScala }) .map(_ => Done) } locatableServices.foreach { service => val c = ServiceRegistryService.of(config.uris.asJava, service.descriptor.acls) registry .register(service.descriptor.name) .invoke(c) .exceptionally(new JFunction[Throwable, NotUsed] { def apply(t: Throwable) = { logger .error(s"Service name=[${service.descriptor.name}] couldn't register itself to the service locator.", t) NotUsed.getInstance() } }) } } }
Example 115
Source File: JavaServiceRegistryClient.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.javadsl.registry import java.net.URI import java.util.Optional import javax.inject.Inject import javax.inject.Singleton import com.lightbend.lagom.devmode.internal.registry.AbstractLoggingServiceRegistryClient import com.lightbend.lagom.javadsl.api.transport.NotFound import scala.collection.immutable import scala.compat.java8.FutureConverters._ import scala.compat.java8.OptionConverters import scala.concurrent.ExecutionContext import scala.concurrent.Future @Singleton private[lagom] class JavaServiceRegistryClient @Inject() ( registry: ServiceRegistry, implicit val ec: ExecutionContext ) extends AbstractLoggingServiceRegistryClient { protected override def internalLocateAll(serviceName: String, portName: Option[String]): Future[immutable.Seq[URI]] = registry .lookup(serviceName, OptionConverters.toJava(portName)) .invoke() .toScala .map(immutable.Seq[URI](_)) .recover { case _: NotFound => Nil } }
Example 116
Source File: ServiceRegistryServiceLocator.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.javadsl.registry import java.net.URI import java.util.concurrent.CompletionStage import java.util.Optional import java.util.{ List => JList } import javax.inject.Inject import javax.inject.Singleton import com.lightbend.lagom.devmode.internal.registry.ServiceRegistryClient import com.lightbend.lagom.javadsl.api.Descriptor.Call import com.lightbend.lagom.javadsl.client.CircuitBreakersPanel import com.lightbend.lagom.javadsl.client.CircuitBreakingServiceLocator import scala.collection.JavaConverters._ import scala.compat.java8.FutureConverters._ import scala.compat.java8.OptionConverters._ import scala.concurrent.ExecutionContext @Singleton private[lagom] class ServiceRegistryServiceLocator @Inject() ( circuitBreakers: CircuitBreakersPanel, client: ServiceRegistryClient, implicit val ec: ExecutionContext ) extends CircuitBreakingServiceLocator(circuitBreakers) { override def locateAll(name: String, serviceCall: Call[_, _]): CompletionStage[JList[URI]] = // a ServiceLocator doesn't know what a `portName` is so we default to `None` and the // implementation will return any registry without a port name. This means that in order // for this queries to work any service registered using `http` as portName will also have // to be registered without name. client.locateAll(name, None).map(_.asJava).toJava override def locate(name: String, serviceCall: Call[_, _]): CompletionStage[Optional[URI]] = // a ServiceLocator doesn't know what a `portName` is so we default to `None` and the // implementation will return any registry without a port name. This means that in order // for this queries to work any service registered using `http` as portName will also have // to be registered without name. client.locateAll(name, None).map(_.headOption.asJava).toJava }
Example 117
Source File: NoServiceLocator.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.registry.impl import java.net.URI import java.util.Optional import java.util.concurrent.CompletionStage import java.util.function.{ Function => JFunction } import com.lightbend.lagom.javadsl.api.Descriptor.Call import com.lightbend.lagom.javadsl.api.ServiceLocator class NoServiceLocator extends ServiceLocator { import java.util.concurrent.CompletableFuture override def locate(name: String, serviceCall: Call[_, _]): CompletionStage[Optional[URI]] = CompletableFuture.completedFuture(Optional.empty()) override def doWithService[T]( name: String, serviceCall: Call[_, _], block: JFunction[URI, CompletionStage[T]] ): CompletionStage[Optional[T]] = CompletableFuture.completedFuture(Optional.empty()) }
Example 118
Source File: ServiceRegistryInteropSpec.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.registry.impl import java.net.URI import java.util.Collections import java.util.Optional import akka.actor.ActorSystem import akka.testkit.TestKit import akka.util.ByteString import com.lightbend.lagom.devmode.internal.scaladsl.registry.RegisteredService import com.lightbend.lagom.devmode.internal.scaladsl.registry.ServiceRegistryService import com.lightbend.lagom.internal.javadsl.registry.{ RegisteredService => jRegisteredService } import com.lightbend.lagom.internal.javadsl.registry.{ ServiceRegistryService => jServiceRegistryService } import com.lightbend.lagom.devmode.internal.scaladsl.registry.{ RegisteredService => sRegisteredService } import com.lightbend.lagom.devmode.internal.scaladsl.registry.{ ServiceRegistryService => sServiceRegistryService } import com.lightbend.lagom.javadsl.api.ServiceAcl import com.lightbend.lagom.javadsl.api.deser.MessageSerializer import com.lightbend.lagom.javadsl.api.deser.StrictMessageSerializer import com.lightbend.lagom.javadsl.api.transport.MessageProtocol import com.lightbend.lagom.javadsl.api.transport.Method import com.lightbend.lagom.javadsl.jackson.JacksonSerializerFactory import org.scalatest.BeforeAndAfterAll import org.scalatest.concurrent.Futures import play.api.libs.json.Format import play.api.libs.json.Json import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers class ServiceRegistryInteropSpec extends AnyFlatSpec with Matchers with Futures with BeforeAndAfterAll { val system = ActorSystem() val jacksonSerializerFactory = new JacksonSerializerFactory(system) protected override def afterAll(): Unit = { TestKit.shutdownActorSystem(actorSystem = system, verifySystemShutdown = true) } behavior.of("ServiceRegistry serializers") it should "should interop between java and scala (RegisteredService)" in { val msg = jRegisteredService.of("inventory", URI.create("https://localhost:123/asdf"), Optional.of("https")) roundTrip(msg) should be(msg) } it should "should interop between java and scala when optional fields are empty (RegisteredService)" in { val msg = jRegisteredService.of("inventory", URI.create("https://localhost:123/asdf"), Optional.empty[String]) roundTrip(msg) should be(msg) } it should "should interop between java and scala (ServiceRegistryService)" in { val msg = jServiceRegistryService.of( URI.create("https://localhost:123/asdf"), Collections.singletonList(ServiceAcl.methodAndPath(Method.GET, "/items")) ) roundTrip(msg) should be(msg) } it should "should interop between java and scala when optional fields are empty (ServiceRegistryService)" in { val msg = jServiceRegistryService.of(URI.create("https://localhost:123/asdf"), Collections.emptyList[ServiceAcl]) roundTrip(msg) should be(msg) } private def roundTrip(input: jServiceRegistryService): jServiceRegistryService = { roundTrip( input, jacksonSerializerFactory.messageSerializerFor[jServiceRegistryService](classOf[jServiceRegistryService]), com.lightbend.lagom.scaladsl.playjson.JsonSerializer[ServiceRegistryService].format )(sServiceRegistryService.format) } private def roundTrip(input: jRegisteredService): jRegisteredService = { roundTrip( input, jacksonSerializerFactory.messageSerializerFor[jRegisteredService](classOf[jRegisteredService]), com.lightbend.lagom.scaladsl.playjson.JsonSerializer[RegisteredService].format )(sRegisteredService.format) } private def roundTrip[J, S]( input: J, jacksonSerializer: StrictMessageSerializer[J], playJsonFormatter: Format[S] )(implicit format: Format[S]): J = { val byteString: ByteString = jacksonSerializer.serializerForRequest().serialize(input) val scalaValue: S = playJsonFormatter.reads(Json.parse(byteString.toArray)).get val str: String = playJsonFormatter.writes(scalaValue).toString() val jacksonDeserializer: MessageSerializer.NegotiatedDeserializer[J, ByteString] = jacksonSerializer.deserializer( new MessageProtocol(Optional.of("application/json"), Optional.empty[String], Optional.empty[String]) ) jacksonDeserializer.deserialize(ByteString(str)) } }
Example 119
Source File: InternalRouterSpec.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.registry.impl import java.net.URI import java.util import java.util.Collections import com.lightbend.lagom.internal.javadsl.registry.ServiceRegistryService import com.lightbend.lagom.javadsl.api.ServiceAcl import com.lightbend.lagom.javadsl.api.transport.Method import com.lightbend.lagom.registry.impl.ServiceRegistryActor.Found import com.lightbend.lagom.registry.impl.ServiceRegistryActor.Route import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers class InternalRouterSpec extends AnyFlatSpec with Matchers { behavior.of("InternalRouter") it should "find the appropriate URI given the portName" in { val httpUri = new URI("http://localhost.com/pathABC") val httpsUri = new URI("https://localhost.com:123/pathABC") val simpleName = "my-service" val acl = ServiceAcl.methodAndPath(Method.GET, "/pathABC") val srs = ServiceRegistryService.of(util.Arrays.asList(httpUri, httpsUri), Collections.singletonList(acl)) val registry = new InternalRegistry(Map.empty) registry.register(simpleName, srs) val router = new InternalRouter router.rebuild(registry) router.routeFor(Route("GET", "/pathABC", None)) should be(Found(httpUri)) router.routeFor(Route("GET", "/pathABC", Some("http"))) should be(Found(httpUri)) router.routeFor(Route("GET", "/pathABC", Some("https"))) should be(Found(httpsUri)) } }
Example 120
Source File: LagomDevModeServiceRegistry.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.devmode.internal.scaladsl.registry import java.net.URI import akka.NotUsed import akka.util.ByteString import com.lightbend.lagom.devmode.internal.registry.ServiceRegistryClient import com.lightbend.lagom.scaladsl.api.deser.MessageSerializer.NegotiatedDeserializer import com.lightbend.lagom.scaladsl.api.deser.MessageSerializer.NegotiatedSerializer import com.lightbend.lagom.scaladsl.api.deser.MessageSerializer import com.lightbend.lagom.scaladsl.api.deser.StrictMessageSerializer import com.lightbend.lagom.scaladsl.api.transport.MessageProtocol import com.lightbend.lagom.scaladsl.api.transport.Method import com.lightbend.lagom.scaladsl.api.Descriptor import com.lightbend.lagom.scaladsl.api.Service import com.lightbend.lagom.scaladsl.api.ServiceAcl import com.lightbend.lagom.scaladsl.api.ServiceCall import play.api.libs.functional.syntax._ import play.api.libs.json._ import scala.collection.immutable import scala.collection.immutable.Seq trait ServiceRegistry extends Service { def register(name: String): ServiceCall[ServiceRegistryService, NotUsed] def unregister(name: String): ServiceCall[NotUsed, NotUsed] def lookup(name: String, portName: Option[String]): ServiceCall[NotUsed, URI] def registeredServices: ServiceCall[NotUsed, immutable.Seq[RegisteredService]] import Service._ import ServiceRegistry._ def descriptor: Descriptor = { named(ServiceRegistryClient.ServiceName) .withCalls( restCall(Method.PUT, "/services/:id", register _), restCall(Method.DELETE, "/services/:id", this.unregister _), restCall(Method.GET, "/services/:id?portName", lookup _), pathCall("/services", registeredServices) ) .withLocatableService(false) } } object ServiceRegistry { implicit val uriMessageSerializer: MessageSerializer[URI, ByteString] = new StrictMessageSerializer[URI] { private val serializer = new NegotiatedSerializer[URI, ByteString] { override def serialize(message: URI): ByteString = ByteString.fromString(message.toString, "utf-8") override val protocol: MessageProtocol = MessageProtocol.empty.withContentType("text/plain").withCharset("utf-8") } override def serializerForRequest = serializer override def serializerForResponse(acceptedMessageProtocols: Seq[MessageProtocol]) = serializer override def deserializer(protocol: MessageProtocol): NegotiatedDeserializer[URI, ByteString] = new NegotiatedDeserializer[URI, ByteString] { override def deserialize(wire: ByteString) = URI.create(wire.decodeString(protocol.charset.getOrElse("utf-8"))) } } } case class RegisteredService(name: String, url: URI, portName: Option[String]) object RegisteredService { import UriFormat.uriFormat implicit val format: Format[RegisteredService] = Json.format[RegisteredService] } case class ServiceRegistryService(uris: immutable.Seq[URI], acls: immutable.Seq[ServiceAcl]) object ServiceRegistryService { def apply(uri: URI, acls: immutable.Seq[ServiceAcl]): ServiceRegistryService = ServiceRegistryService(Seq(uri), acls) import UriFormat.uriFormat implicit val methodFormat: Format[Method] = (__ \ "name").format[String].inmap(new Method(_), _.name) implicit val serviceAclFormat: Format[ServiceAcl] = (__ \ "method") .formatNullable[Method] .and((__ \ "pathRegex").formatNullable[String]) .apply(ServiceAcl.apply, acl => (acl.method, acl.pathRegex)) implicit val format: Format[ServiceRegistryService] = Json.format[ServiceRegistryService] } object UriFormat { implicit val uriFormat: Format[URI] = implicitly[Format[String]].inmap(URI.create, _.toString) }
Example 121
Source File: ServiceRegistryServiceLocator.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.devmode.internal.scaladsl.registry import java.net.URI import com.lightbend.lagom.devmode.internal.registry.ServiceRegistryClient import com.lightbend.lagom.scaladsl.api.Descriptor.Call import com.lightbend.lagom.scaladsl.client.CircuitBreakersPanel import com.lightbend.lagom.scaladsl.client.CircuitBreakingServiceLocator import scala.concurrent.ExecutionContext import scala.concurrent.Future private[lagom] class ServiceRegistryServiceLocator( circuitBreakers: CircuitBreakersPanel, client: ServiceRegistryClient, implicit val ec: ExecutionContext ) extends CircuitBreakingServiceLocator(circuitBreakers) { override def locateAll(name: String, serviceCall: Call[_, _]): Future[List[URI]] = // a ServiceLocator doesn't know what a `portName` is so we default to `None` and the // implementation will return any registry without a port name. This means that in order // for this queries to work any service registered using `http` as portName will also have // to be registered without name. client.locateAll(name, None).map(_.toList) override def locate(name: String, serviceCall: Call[_, _]): Future[Option[URI]] = locateAll(name, serviceCall).map(_.headOption) }
Example 122
Source File: ScalaServiceRegistryClient.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.devmode.internal.scaladsl.registry import java.net.URI import com.lightbend.lagom.devmode.internal.registry.AbstractLoggingServiceRegistryClient import com.lightbend.lagom.scaladsl.api.transport.NotFound import scala.collection.immutable import scala.concurrent.ExecutionContext import scala.concurrent.Future private[lagom] class ScalaServiceRegistryClient(registry: ServiceRegistry)(implicit ec: ExecutionContext) extends AbstractLoggingServiceRegistryClient { protected override def internalLocateAll(serviceName: String, portName: Option[String]): Future[immutable.Seq[URI]] = registry .lookup(serviceName, portName) .invoke() .map(immutable.Seq[URI](_)) .recover { case _: NotFound => Nil } }
Example 123
Source File: AbstractLoggingServiceRegistryClient.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.devmode.internal.registry import java.net.URI import org.slf4j.Logger import org.slf4j.LoggerFactory import scala.collection.immutable import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.util.Failure import scala.util.Success private[lagom] abstract class AbstractLoggingServiceRegistryClient(implicit ec: ExecutionContext) extends ServiceRegistryClient { protected val log: Logger = LoggerFactory.getLogger(getClass) override def locateAll(serviceName: String, portName: Option[String]): Future[immutable.Seq[URI]] = { require( serviceName != ServiceRegistryClient.ServiceName, "The service registry client cannot locate the service registry service itself" ) log.debug("Locating service name=[{}] ...", serviceName) val location: Future[immutable.Seq[URI]] = internalLocateAll(serviceName, portName) location.onComplete { case Success(Nil) => log.warn("serviceName=[{}] was not found. Hint: Maybe it was not started?", serviceName) case Success(uris) => log.debug("serviceName=[{}] can be reached at uris=[{}]", serviceName: Any, uris: Any) case Failure(e) => log.warn("Service registry replied with an error when looking up serviceName=[{}]", serviceName: Any, e: Any) } location } protected def internalLocateAll(serviceName: String, portName: Option[String]): Future[immutable.Seq[URI]] }
Example 124
Source File: LagomDevModeServiceDiscovery.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.devmode.internal.registry import java.net.InetAddress import java.net.URI import akka.actor.ActorSystem import akka.discovery.ServiceDiscovery._ import akka.discovery.Discovery import akka.discovery.Lookup import akka.discovery.ServiceDiscovery import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.concurrent.Promise import scala.concurrent.duration.FiniteDuration private[lagom] class LagomDevModeServiceDiscovery(system: ActorSystem) extends ServiceDiscovery { private val clientPromise = Promise[ServiceRegistryClient] private implicit val ec: ExecutionContext = system.dispatcher def setServiceRegistryClient(client: ServiceRegistryClient): Unit = clientPromise.success(client) override def lookup(lookup: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = for { client <- clientPromise.future uris <- client.locateAll(lookup.serviceName, lookup.portName) } yield Resolved(lookup.serviceName, uris.map(toResolvedTarget)) private def toResolvedTarget(uri: URI) = ResolvedTarget( uri.getHost, optionalPort(uri.getPort), // we don't have the InetAddress, but instead of using None // we default to localhost as such we can use it for Akka Cluster Bootstrap eventually address = Some(InetAddress.getLocalHost) ) private def optionalPort(port: Int): Option[Int] = if (port < 0) None else Some(port) } private[lagom] object LagomDevModeServiceDiscovery { def apply(system: ActorSystem): LagomDevModeServiceDiscovery = Discovery(system) .loadServiceDiscovery("lagom-dev-mode") .asInstanceOf[LagomDevModeServiceDiscovery] }
Example 125
Source File: LagomDevModeServiceDiscoverySpec.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.devmode.internal.registry import java.net.InetAddress import java.net.URI import akka.actor.ActorSystem import akka.discovery.ServiceDiscovery.Resolved import akka.discovery.ServiceDiscovery.ResolvedTarget import akka.testkit.TestKit import org.scalatest.BeforeAndAfterAll import org.scalatest.concurrent.ScalaFutures._ import scala.collection.immutable import scala.concurrent.Future import scala.concurrent.duration._ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike class LagomDevModeServiceDiscoverySpec extends TestKit(ActorSystem("LagomDevModeSimpleServiceDiscoverySpec")) with AnyWordSpecLike with Matchers with BeforeAndAfterAll { private val client = new StaticServiceRegistryClient( Map( "test-service" -> List(URI.create("http://localhost:8080")), "test-service-without-port" -> List(URI.create("http://localhost")) ) ) protected override def afterAll(): Unit = { shutdown(verifySystemShutdown = true) } private val discovery = LagomDevModeServiceDiscovery(system) discovery.setServiceRegistryClient(client) "DevModeSimpleServiceDiscoverySpec" should { "resolve services in the registry" in { val expected = Resolved("test-service", List(ResolvedTarget("localhost", Some(8080), Some(InetAddress.getLocalHost)))) discovery.lookup("test-service", 100.milliseconds).futureValue shouldBe expected } "allow missing ports" in { val expected = Resolved("test-service-without-port", List(ResolvedTarget("localhost", None, Some(InetAddress.getLocalHost)))) discovery.lookup("test-service-without-port", 100.milliseconds).futureValue shouldBe expected } } } private class StaticServiceRegistryClient(registrations: Map[String, List[URI]]) extends ServiceRegistryClient { override def locateAll(serviceName: String, portName: Option[String]): Future[immutable.Seq[URI]] = Future.successful(registrations.getOrElse(serviceName, Nil)) }
Example 126
Source File: AbstractLoggingServiceRegistryClientSpec.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.devmode.internal.registry import java.net.URI import scala.concurrent.Future import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AsyncWordSpec class AbstractLoggingServiceRegistryClientSpec extends AsyncWordSpec with Matchers { private val client = new AbstractLoggingServiceRegistryClient { override def internalLocateAll(serviceName: String, portName: Option[String]): Future[List[URI]] = serviceName match { case "failing-service" => Future.failed(new IllegalArgumentException("Ignore: expected error")) case "empty-service" => Future.successful(List()) case "successful-service" => Future.successful(List(URI.create("http://localhost:8080"))) } } "AbstractLoggingServiceRegistryClient" when { "internal lookup fails" in { client .locateAll("failing-service", None) .failed .map(_ shouldBe an[IllegalArgumentException]) } "internal lookup has no result" in { client .locateAll("empty-service", None) .map(_ shouldEqual Nil) } "internal lookup has a successful result" in { client .locateAll("successful-service", None) .map(_ shouldEqual List(URI.create("http://localhost:8080"))) } } }
Example 127
Source File: AkkaDiscoveryServiceLocator.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.scaladsl.akka.discovery import java.net.URI import akka.actor.ActorSystem import akka.discovery.Discovery import com.lightbend.lagom.internal.client.AkkaDiscoveryHelper import com.lightbend.lagom.scaladsl.api.Descriptor import com.lightbend.lagom.scaladsl.api.ServiceLocator import com.lightbend.lagom.scaladsl.client.CircuitBreakersPanel import com.lightbend.lagom.scaladsl.client.CircuitBreakingServiceLocator import scala.concurrent.ExecutionContext import scala.concurrent.Future class AkkaDiscoveryServiceLocator(circuitBreakers: CircuitBreakersPanel, actorSystem: ActorSystem)( implicit ec: ExecutionContext ) extends CircuitBreakingServiceLocator(circuitBreakers) { private val helper: AkkaDiscoveryHelper = new AkkaDiscoveryHelper( actorSystem.settings.config.getConfig("lagom.akka.discovery"), Discovery(actorSystem).discovery ) override def locate(name: String, serviceCall: Descriptor.Call[_, _]): Future[Option[URI]] = helper.locate(name) override def locateAll(name: String, serviceCall: Descriptor.Call[_, _]): Future[List[URI]] = helper.locateAll(name).map(_.toList) }
Example 128
Source File: AkkaDiscoveryHelper.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.client import java.net.URI import java.net.URISyntaxException import java.util.concurrent.ThreadLocalRandom import java.util.concurrent.TimeUnit import akka.discovery.ServiceDiscovery import akka.discovery.ServiceDiscovery.ResolvedTarget import com.typesafe.config.Config import org.slf4j.LoggerFactory import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.concurrent.duration._ private[lagom] class AkkaDiscoveryHelper(config: Config, serviceDiscovery: ServiceDiscovery)( implicit ec: ExecutionContext ) { private val logger = LoggerFactory.getLogger(this.getClass) private val serviceNameMapper = new ServiceNameMapper(config) private val lookupTimeout = config.getDuration("lookup-timeout", TimeUnit.MILLISECONDS).millis def locateAll(name: String): Future[Seq[URI]] = { val serviceLookup = serviceNameMapper.mapLookupQuery(name) serviceDiscovery .lookup(serviceLookup.lookup, lookupTimeout) .map { resolved => logger.debug("Retrieved addresses: {}", resolved.addresses) resolved.addresses.map(target => toURI(target, serviceLookup)) } } def locate(name: String): Future[Option[URI]] = locateAll(name).map(selectRandomURI) private def toURI(resolvedTarget: ResolvedTarget, lookup: ServiceLookup): URI = { val port = resolvedTarget.port.getOrElse(-1) val scheme = lookup.scheme.orNull try { new URI( scheme, // scheme null, // userInfo resolvedTarget.host, // host port, // port null, // path null, // query null // fragment ) } catch { case e: URISyntaxException => throw new RuntimeException(e) } } private def selectRandomURI(uris: Seq[URI]) = uris match { case Nil => None case Seq(one) => Some(one) case many => Some(many(ThreadLocalRandom.current().nextInt(many.size))) } }
Example 129
Source File: ConfigurationServiceLocatorSpec.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.javadsl.api import java.net.URI import java.util.concurrent.TimeUnit import com.typesafe.config.ConfigFactory import scala.compat.java8.OptionConverters._ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec class ConfigurationServiceLocatorSpec extends AnyWordSpec with Matchers { val serviceLocator = new ConfigurationServiceLocator( ConfigFactory.parseString( """ |lagom.services { | foo = "http://localhost:10001" | bar = "http://localhost:10002" |} """.stripMargin ) ) def locate(serviceName: String) = serviceLocator.locate(serviceName).toCompletableFuture.get(10, TimeUnit.SECONDS).asScala "ConfigurationServiceLocator" should { "return a found service" in { locate("foo") should contain(URI.create("http://localhost:10001")) locate("bar") should contain(URI.create("http://localhost:10002")) } "return none for not found service" in { locate("none") shouldBe None } } }
Example 130
Source File: ConfigurationServiceLocatorSpec.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.javadsl.client import java.net.URI import java.util.concurrent.CompletionStage import java.util.concurrent.TimeUnit import java.util.function.Supplier import com.typesafe.config.ConfigFactory import scala.compat.java8.OptionConverters._ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec class ConfigurationServiceLocatorSpec extends AnyWordSpec with Matchers { val serviceLocator = new ConfigurationServiceLocator( ConfigFactory.parseString( """ |lagom.services { | foo = "http://localhost:10001" | bar = "http://localhost:10002" |} """.stripMargin ), new CircuitBreakersPanel { override def withCircuitBreaker[T](id: String, body: Supplier[CompletionStage[T]]): CompletionStage[T] = body.get() } ) def locate(serviceName: String) = serviceLocator.locate(serviceName).toCompletableFuture.get(10, TimeUnit.SECONDS).asScala "ConfigurationServiceLocator" should { "return a found service" in { locate("foo") should contain(URI.create("http://localhost:10001")) locate("bar") should contain(URI.create("http://localhost:10002")) } "return none for not found service" in { locate("none") shouldBe None } } }
Example 131
Source File: Producer.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.broker.kafka import java.net.URI import akka.actor.ActorSystem import akka.persistence.query.{ Offset => AkkaOffset } import akka.stream.Materializer import akka.stream.scaladsl._ import com.lightbend.lagom.internal.projection.ProjectionRegistry import com.lightbend.lagom.internal.projection.ProjectionRegistryActor.WorkerCoordinates import com.lightbend.lagom.spi.persistence.OffsetStore import org.apache.kafka.common.serialization.Serializer import scala.collection.immutable import scala.concurrent.ExecutionContext import scala.concurrent.Future private[lagom] object Producer { def startTaggedOffsetProducer[Message]( system: ActorSystem, tags: immutable.Seq[String], kafkaConfig: KafkaConfig, locateService: String => Future[Seq[URI]], topicId: String, eventStreamFactory: (String, AkkaOffset) => Source[(Message, AkkaOffset), _], partitionKeyStrategy: Option[Message => String], serializer: Serializer[Message], offsetStore: OffsetStore, projectionRegistry: ProjectionRegistry )(implicit mat: Materializer, ec: ExecutionContext): Unit = { val projectionName = s"kafkaProducer-$topicId" val producerConfig = ProducerConfig(system.settings.config) val topicProducerProps = (coordinates: WorkerCoordinates) => TopicProducerActor.props( coordinates, kafkaConfig, producerConfig, locateService, topicId, eventStreamFactory, partitionKeyStrategy, serializer, offsetStore ) val entityIds = tags.toSet projectionRegistry.registerProjection( projectionName, entityIds, topicProducerProps, producerConfig.role ) } }
Example 132
Source File: CassandraPersistenceModule.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.javadsl.persistence.cassandra import java.net.URI import scala.concurrent.Future import akka.actor.ActorSystem import com.lightbend.lagom.internal.javadsl.persistence.cassandra._ import com.lightbend.lagom.internal.persistence.cassandra.CassandraOffsetStore import com.lightbend.lagom.internal.persistence.cassandra.CassandraReadSideSettings import com.lightbend.lagom.internal.persistence.cassandra.ServiceLocatorAdapter import com.lightbend.lagom.internal.persistence.cassandra.ServiceLocatorHolder import com.lightbend.lagom.javadsl.api.ServiceLocator import com.lightbend.lagom.javadsl.persistence.PersistentEntityRegistry import com.lightbend.lagom.spi.persistence.OffsetStore import javax.annotation.PostConstruct import javax.inject.Inject import play.api.Configuration import play.api.Environment import play.api.inject._ import scala.util.Try class CassandraPersistenceModule extends Module { override def bindings(environment: Environment, configuration: Configuration): Seq[Binding[_]] = Seq( bind[CassandraPersistenceModule.InitServiceLocatorHolder].toSelf.eagerly(), bind[PersistentEntityRegistry].to[CassandraPersistentEntityRegistry], bind[CassandraSession].toSelf, bind[CassandraReadSide].to[CassandraReadSideImpl], bind[CassandraReadSideSettings].toSelf, bind[CassandraOffsetStore].to[JavadslCassandraOffsetStore], bind[OffsetStore].to(bind[CassandraOffsetStore]) ) } private[lagom] object CassandraPersistenceModule { class InitServiceLocatorHolder @Inject() (system: ActorSystem, injector: Injector) { // Guice doesn't support this, but other DI frameworks do. @PostConstruct def init(): Unit = { Try(injector.instanceOf[ServiceLocator]).foreach { locator => ServiceLocatorHolder(system).setServiceLocator(new ServiceLocatorAdapter { override def locateAll(name: String): Future[List[URI]] = { import system.dispatcher import scala.compat.java8.FutureConverters._ import scala.collection.JavaConverters._ locator.locateAll(name).toScala.map(_.asScala.toList) } }) } } } }
Example 133
Source File: CassandraPersistenceComponents.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.scaladsl.persistence.cassandra import scala.concurrent.Future import java.net.URI import com.lightbend.lagom.internal.scaladsl.persistence.cassandra.CassandraPersistentEntityRegistry import com.lightbend.lagom.internal.scaladsl.persistence.cassandra.CassandraReadSideImpl import com.lightbend.lagom.internal.scaladsl.persistence.cassandra.ScaladslCassandraOffsetStore import com.lightbend.lagom.scaladsl.api.ServiceLocator import com.lightbend.lagom.scaladsl.persistence.PersistenceComponents import com.lightbend.lagom.scaladsl.persistence.PersistentEntityRegistry import com.lightbend.lagom.scaladsl.persistence.ReadSidePersistenceComponents import com.lightbend.lagom.scaladsl.persistence.WriteSidePersistenceComponents import com.lightbend.lagom.internal.persistence.cassandra.CassandraReadSideSettings import com.lightbend.lagom.internal.persistence.cassandra.CassandraOffsetStore import com.lightbend.lagom.internal.persistence.cassandra.ServiceLocatorAdapter import com.lightbend.lagom.internal.persistence.cassandra.ServiceLocatorHolder import com.lightbend.lagom.spi.persistence.OffsetStore trait ReadSideCassandraPersistenceComponents extends ReadSidePersistenceComponents { lazy val cassandraSession: CassandraSession = new CassandraSession(actorSystem) lazy val testCasReadSideSettings: CassandraReadSideSettings = new CassandraReadSideSettings(actorSystem) private[lagom] lazy val cassandraOffsetStore: CassandraOffsetStore = new ScaladslCassandraOffsetStore(actorSystem, cassandraSession, testCasReadSideSettings, readSideConfig)( executionContext ) lazy val offsetStore: OffsetStore = cassandraOffsetStore lazy val cassandraReadSide: CassandraReadSide = new CassandraReadSideImpl(actorSystem, cassandraSession, cassandraOffsetStore) }
Example 134
Source File: ServiceLocatorSessionProvider.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.persistence.cassandra import java.net.InetSocketAddress import java.net.URI import scala.collection.immutable import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.concurrent.Promise import scala.concurrent.duration._ import scala.util.control.NoStackTrace import akka.actor.ActorSystem import akka.persistence.cassandra.ConfigSessionProvider import com.typesafe.config.Config import play.api.Logger private[lagom] final class ServiceLocatorSessionProvider(system: ActorSystem, config: Config) extends ConfigSessionProvider(system, config) { private val log = Logger(getClass) override def lookupContactPoints( clusterId: String )(implicit ec: ExecutionContext): Future[immutable.Seq[InetSocketAddress]] = { ServiceLocatorHolder(system).serviceLocatorEventually.flatMap { serviceLocatorAdapter => serviceLocatorAdapter.locateAll(clusterId).map { case Nil => throw new NoContactPointsException(s"No contact points for [$clusterId]") case uris => log.debug(s"Found Cassandra contact points: $uris") // URIs must be all valid uris.foreach { uri => require(uri.getHost != null, s"missing host in $uri for Cassandra contact points $clusterId") require(uri.getPort != -1, s"missing port in $uri for Cassandra contact points $clusterId") } uris.map { uri => new InetSocketAddress(uri.getHost, uri.getPort) } } } } } private[lagom] final class NoContactPointsException(msg: String) extends RuntimeException(msg) with NoStackTrace
Example 135
Source File: ServiceLocatorSessionProviderSpec.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.persistence.cassandra import java.net.InetSocketAddress import java.net.URI import akka.actor.ActorSystem import akka.testkit.TestKit import com.typesafe.config.Config import com.typesafe.config.ConfigFactory import org.scalatest.BeforeAndAfterAll import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration._ import scala.concurrent.Await import scala.concurrent.Future import org.scalatest.matchers.must.Matchers import org.scalatest.wordspec.AnyWordSpec class ServiceLocatorSessionProviderSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll { val system = ActorSystem("test") val config: Config = ConfigFactory.load() val uri = new URI("http://localhost:8080") protected override def afterAll(): Unit = { TestKit.shutdownActorSystem(actorSystem = system, verifySystemShutdown = true) } val locator = new ServiceLocatorAdapter { override def locateAll(name: String): Future[List[URI]] = { name match { case "existing" => Future.successful(List(uri)) case "absent" => Future.successful(Nil) } } } val providerConfig: Config = config.getConfig("lagom.persistence.read-side.cassandra") val provider = new ServiceLocatorSessionProvider(system, providerConfig) ServiceLocatorHolder(system).setServiceLocator(locator) "ServiceLocatorSessionProvider" should { "Get the address when the contact points exist" in { val future = provider.lookupContactPoints("existing") Await.result(future, 3.seconds) mustBe Seq(new InetSocketAddress(uri.getHost, uri.getPort)) } "Fail the future when the contact points do not exist" in { val future = provider.lookupContactPoints("absent") intercept[NoContactPointsException] { Await.result(future, 3.seconds) } } } }
Example 136
Source File: TestServiceLocator.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.testkit import java.net.URI import java.util.Optional import java.util.concurrent.CompletionStage import scala.compat.java8.FutureConverters._ import scala.concurrent.ExecutionContext import scala.concurrent.Future import com.lightbend.lagom.javadsl.api.Descriptor import javax.inject.Inject import javax.inject.Singleton import com.lightbend.lagom.javadsl.client.CircuitBreakersPanel import com.lightbend.lagom.javadsl.client.CircuitBreakingServiceLocator @Singleton private[lagom] class TestServiceLocator @Inject() ( circuitBreakers: CircuitBreakersPanel, port: TestServiceLocatorPort, implicit val ec: ExecutionContext ) extends CircuitBreakingServiceLocator(circuitBreakers) { private val futureUri = port.port.map(p => URI.create("http://localhost:" + p)) override def locate(name: String, call: Descriptor.Call[_, _]): CompletionStage[Optional[URI]] = futureUri.map(uri => Optional.of(uri)).toJava } private[lagom] final case class TestServiceLocatorPort(port: Future[Int])
Example 137
Source File: HadoopFileLinesReader.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.datasources import java.io.Closeable import java.net.URI import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.hadoop.io.Text import org.apache.hadoop.mapreduce._ import org.apache.hadoop.mapreduce.lib.input.{FileSplit, LineRecordReader} import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl class HadoopFileLinesReader( file: PartitionedFile, conf: Configuration) extends Iterator[Text] with Closeable { private val iterator = { val fileSplit = new FileSplit( new Path(new URI(file.filePath)), file.start, file.length, // TODO: Implement Locality Array.empty) val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0) val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId) val reader = new LineRecordReader() reader.initialize(fileSplit, hadoopAttemptContext) new RecordReaderIterator(reader) } override def hasNext: Boolean = iterator.hasNext override def next(): Text = iterator.next() override def close(): Unit = iterator.close() }
Example 138
Source File: resources.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.command import java.io.File import java.net.URI import org.apache.hadoop.fs.Path import org.apache.spark.sql.{Row, SparkSession} import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference} import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType} case class ListJarsCommand(jars: Seq[String] = Seq.empty[String]) extends RunnableCommand { override val output: Seq[Attribute] = { AttributeReference("Results", StringType, nullable = false)() :: Nil } override def run(sparkSession: SparkSession): Seq[Row] = { val jarList = sparkSession.sparkContext.listJars() if (jars.nonEmpty) { for { jarName <- jars.map(f => new Path(f).getName) jarPath <- jarList if jarPath.contains(jarName) } yield Row(jarPath) } else { jarList.map(Row(_)) } } }
Example 139
Source File: FileStreamSourceSuite.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.streaming import java.io.File import java.net.URI import scala.util.Random import org.apache.hadoop.fs.{FileStatus, Path, RawLocalFileSystem} import org.apache.spark.SparkFunSuite import org.apache.spark.sql.execution.streaming.ExistsThrowsExceptionFileSystem._ import org.apache.spark.sql.test.SharedSQLContext import org.apache.spark.sql.types.StructType class FileStreamSourceSuite extends SparkFunSuite with SharedSQLContext { import FileStreamSource._ test("SeenFilesMap") { val map = new SeenFilesMap(maxAgeMs = 10) map.add("a", 5) assert(map.size == 1) map.purge() assert(map.size == 1) // Add a new entry and purge should be no-op, since the gap is exactly 10 ms. map.add("b", 15) assert(map.size == 2) map.purge() assert(map.size == 2) // Add a new entry that's more than 10 ms than the first entry. We should be able to purge now. map.add("c", 16) assert(map.size == 3) map.purge() assert(map.size == 2) // Override existing entry shouldn't change the size map.add("c", 25) assert(map.size == 2) // Not a new file because we have seen c before assert(!map.isNewFile("c", 20)) // Not a new file because timestamp is too old assert(!map.isNewFile("d", 5)) // Finally a new file: never seen and not too old assert(map.isNewFile("e", 20)) } test("SeenFilesMap should only consider a file old if it is earlier than last purge time") { val map = new SeenFilesMap(maxAgeMs = 10) map.add("a", 20) assert(map.size == 1) // Timestamp 5 should still considered a new file because purge time should be 0 assert(map.isNewFile("b", 9)) assert(map.isNewFile("b", 10)) // Once purge, purge time should be 10 and then b would be a old file if it is less than 10. map.purge() assert(!map.isNewFile("b", 9)) assert(map.isNewFile("b", 10)) } testWithUninterruptibleThread("do not recheck that files exist during getBatch") { withTempDir { temp => spark.conf.set( s"fs.$scheme.impl", classOf[ExistsThrowsExceptionFileSystem].getName) // add the metadata entries as a pre-req val dir = new File(temp, "dir") // use non-existent directory to test whether log make the dir val metadataLog = new FileStreamSourceLog(FileStreamSourceLog.VERSION, spark, dir.getAbsolutePath) assert(metadataLog.add(0, Array(FileEntry(s"$scheme:///file1", 100L, 0)))) val newSource = new FileStreamSource(spark, s"$scheme:///", "parquet", StructType(Nil), Nil, dir.getAbsolutePath, Map.empty) // this method should throw an exception if `fs.exists` is called during resolveRelation newSource.getBatch(None, FileStreamSourceOffset(1)) } } } override def listStatus(file: Path): Array[FileStatus] = { val emptyFile = new FileStatus() emptyFile.setPath(file) Array(emptyFile) } } object ExistsThrowsExceptionFileSystem { val scheme = s"FileStreamSourceSuite${math.abs(Random.nextInt)}fs" }
Example 140
Source File: ApplicationDescription.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import java.net.URI private[spark] case class ApplicationDescription( name: String, maxCores: Option[Int], memoryPerExecutorMB: Int, command: Command, appUiUrl: String, eventLogDir: Option[URI] = None, // short name of compression codec used when writing event logs, if any (e.g. lzf) eventLogCodec: Option[String] = None, coresPerExecutor: Option[Int] = None, // number of executors this application wants to start with, // only used if dynamic allocation is enabled initialExecutorLimit: Option[Int] = None, user: String = System.getProperty("user.name", "<unknown>")) { override def toString: String = "ApplicationDescription(" + name + ")" }
Example 141
Source File: ClientArguments.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import java.net.{URI, URISyntaxException} import scala.annotation.tailrec import scala.collection.mutable.ListBuffer import org.apache.log4j.Level import org.apache.spark.util.{IntParam, MemoryParam, Utils} private def printUsageAndExit(exitCode: Int) { // TODO: It wouldn't be too hard to allow users to submit their app and dependency jars // separately similar to in the YARN client. val usage = s""" |Usage: DriverClient [options] launch <active-master> <jar-url> <main-class> [driver options] |Usage: DriverClient kill <active-master> <driver-id> | |Options: | -c CORES, --cores CORES Number of cores to request (default: $DEFAULT_CORES) | -m MEMORY, --memory MEMORY Megabytes of memory to request (default: $DEFAULT_MEMORY) | -s, --supervise Whether to restart the driver on failure | (default: $DEFAULT_SUPERVISE) | -v, --verbose Print more debugging output """.stripMargin // scalastyle:off println System.err.println(usage) // scalastyle:on println System.exit(exitCode) } } private[deploy] object ClientArguments { val DEFAULT_CORES = 1 val DEFAULT_MEMORY = Utils.DEFAULT_DRIVER_MEM_MB // MB val DEFAULT_SUPERVISE = false def isValidJarUrl(s: String): Boolean = { try { val uri = new URI(s) uri.getScheme != null && uri.getPath != null && uri.getPath.endsWith(".jar") } catch { case _: URISyntaxException => false } } }
Example 142
Source File: ApplicationDescription.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import java.net.URI private[spark] class ApplicationDescription( val name: String, val maxCores: Option[Int], val memoryPerSlave: Int, val command: Command, var appUiUrl: String, val eventLogDir: Option[URI] = None, // short name of compression codec used when writing event logs, if any (e.g. lzf) val eventLogCodec: Option[String] = None) extends Serializable { val user = System.getProperty("user.name", "<unknown>") def copy( name: String = name, maxCores: Option[Int] = maxCores, memoryPerSlave: Int = memoryPerSlave, command: Command = command, appUiUrl: String = appUiUrl, eventLogDir: Option[URI] = eventLogDir, eventLogCodec: Option[String] = eventLogCodec): ApplicationDescription = new ApplicationDescription( name, maxCores, memoryPerSlave, command, appUiUrl, eventLogDir, eventLogCodec) override def toString: String = "ApplicationDescription(" + name + ")" }
Example 143
Source File: ClientArguments.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import java.net.{URI, URISyntaxException} import scala.collection.mutable.ListBuffer import org.apache.log4j.Level import org.apache.spark.util.{IntParam, MemoryParam} def printUsageAndExit(exitCode: Int) { // TODO: It wouldn't be too hard to allow users to submit their app and dependency jars // separately similar to in the YARN client. val usage = s""" |Usage: DriverClient [options] launch <active-master> <jar-url> <main-class> [driver options] |Usage: DriverClient kill <active-master> <driver-id> | |Options: | -c CORES, --cores CORES Number of cores to request (default: $DEFAULT_CORES) | -m MEMORY, --memory MEMORY Megabytes of memory to request (default: $DEFAULT_MEMORY) | -s, --supervise Whether to restart the driver on failure | (default: $DEFAULT_SUPERVISE) | -v, --verbose Print more debugging output """.stripMargin System.err.println(usage) System.exit(exitCode) } } object ClientArguments { private[spark] val DEFAULT_CORES = 1 private[spark] val DEFAULT_MEMORY = 512 // MB private[spark] val DEFAULT_SUPERVISE = false def isValidJarUrl(s: String): Boolean = { try { val uri = new URI(s) uri.getScheme != null && uri.getPath != null && uri.getPath.endsWith(".jar") } catch { case _: URISyntaxException => false } } }
Example 144
Source File: LinkLabel.scala From scalismo-ui with GNU General Public License v3.0 | 5 votes |
package scalismo.ui.view.util import java.awt.event.{MouseAdapter, MouseEvent} import java.awt.{Color, Cursor, Desktop} import java.net.URI import javax.swing.Icon import scala.swing.Swing.EmptyIcon import scala.swing.{Alignment, Label} object LinkLabel { lazy val desktop: Option[Desktop] = { if (!Desktop.isDesktopSupported) None else { val desktop = Option(Desktop.getDesktop) if (desktop.nonEmpty && desktop.forall(_.isSupported(Desktop.Action.BROWSE))) desktop else None } } } class LinkLabel(text: String, uri: URI, icon: Icon = EmptyIcon, alignment: Alignment.Value = Alignment.Center, preventLinkStyle: Boolean = false, preventTooltip: Boolean = false) extends Label(text, icon, alignment) { if (!preventTooltip) { tooltip = uri.toString } // this will only kick in if the desktop can actually open links LinkLabel.desktop.foreach { d => if (!preventLinkStyle) { foreground = Color.BLUE.darker() // val attributes = font.getAttributes // attributes.asInstanceOf[java.util.Map[Object, Object]].put(TextAttribute.UNDERLINE, TextAttribute.UNDERLINE_ON) // font = font.deriveFont(attributes) } cursor = Cursor.getPredefinedCursor(Cursor.HAND_CURSOR) peer.addMouseListener(new MouseAdapter { override def mouseClicked(e: MouseEvent): Unit = { if (e.getClickCount == 1) { d.browse(uri) } } }) } }
Example 145
Source File: SerializedCpg.scala From codepropertygraph with Apache License 2.0 | 5 votes |
package io.shiftleft import java.io.{File, IOException} import java.net.{URI, URISyntaxException} import java.nio.file.{FileSystem, FileSystems, Files} import java.util import com.google.protobuf.GeneratedMessageV3 class SerializedCpg extends AutoCloseable { @throws[IOException] def addOverlay(overlay: GeneratedMessageV3, name: String): Unit = { if (!isEmpty) { val pathInZip = zipFileSystem.getPath(s"${counter}_${name}") counter += 1 val outputStream = Files.newOutputStream(pathInZip) overlay.writeTo(outputStream) outputStream.close() } } @throws[IOException] def addOverlay(overlays: Iterator[GeneratedMessageV3], name: String): Unit = { overlays.zipWithIndex.foreach { case (overlay, i) => addOverlay(overlay, name + "_" + i) } } @throws[IOException] override def close(): Unit = { if (!isEmpty) { zipFileSystem.close() } } }
Example 146
Source File: CouchbasePersistenceModule.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.javadsl.persistence.couchbase import java.net.URI import akka.actor.ActorSystem import akka.event.Logging import akka.stream.alpakka.couchbase.javadsl.CouchbaseSession import akka.stream.alpakka.couchbase.CouchbaseSessionSettings import com.google.inject.Provider import com.lightbend.lagom.internal.javadsl.persistence.couchbase.{ CouchbasePersistentEntityRegistry, CouchbaseReadSideImpl, JavadslCouchbaseOffsetStore } import com.lightbend.lagom.internal.persistence.couchbase.{ CouchbaseConfigValidator, CouchbaseOffsetStore, ServiceLocatorAdapter, ServiceLocatorHolder } import com.lightbend.lagom.javadsl.api.ServiceLocator import com.lightbend.lagom.javadsl.persistence.PersistentEntityRegistry import com.lightbend.lagom.spi.persistence.OffsetStore import com.typesafe.config.Config import javax.inject.Inject import play.api.inject.{Binding, Injector, Module} import play.api.{Configuration, Environment} import scala.compat.java8.FutureConverters._ import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import scala.util.Try class CouchbasePersistenceModule extends Module { override def bindings(environment: Environment, configuration: Configuration): Seq[Binding[_]] = Seq( bind[CouchbasePersistenceModule.InitServiceLocatorHolder].toSelf.eagerly(), bind[PersistentEntityRegistry].to[CouchbasePersistentEntityRegistry], bind[CouchbaseSession].toProvider[CouchbaseProvider], bind[CouchbaseReadSide].to[CouchbaseReadSideImpl], //TODO: add other modules similar to Cassandra // bind[CassandraReadSideSettings].toSelf, bind[CouchbaseOffsetStore].to(bind[JavadslCouchbaseOffsetStore]), bind[OffsetStore].to(bind[CouchbaseOffsetStore]) ) } private[lagom] class CouchbaseProvider @Inject() (system: ActorSystem, cfg: Config) extends Provider[CouchbaseSession] { private val log = Logging(system, classOf[CouchbaseProvider]) CouchbaseConfigValidator.validateBucket("lagom.persistence.read-side.couchbase", cfg, log) private val readSideCouchbaseConfig: Config = cfg.getConfig("lagom.persistence.read-side.couchbase") private val sessionSettings = CouchbaseSessionSettings( readSideCouchbaseConfig.getConfig("connection") ) private val bucket = readSideCouchbaseConfig.getString("bucket") // FIXME is there a way to have async component creation in lagom instead of letting every component know that the thing is async? // if not we should pass Future[CouchbaseSession] around and let the use sites mix in AsyncCouchbaseSession - but if we use // that from Lagom it needs to be made public API // FIXME this should be the Java API of CouchbaseSession, when there is one lazy val couchbase: CouchbaseSession = Await.result(CouchbaseSession.create(sessionSettings, bucket, system.dispatcher).toScala, 30.seconds) override def get(): CouchbaseSession = couchbase } private[lagom] object CouchbasePersistenceModule { class InitServiceLocatorHolder @Inject() (system: ActorSystem, injector: Injector) { def init(): Unit = Try(injector.instanceOf[ServiceLocator]).foreach { locator => ServiceLocatorHolder(system).setServiceLocator(new ServiceLocatorAdapter { override def locateAll(name: String): Future[List[URI]] = { import system.dispatcher import scala.collection.JavaConverters._ import scala.compat.java8.FutureConverters._ locator.locateAll(name).toScala.map(_.asScala.toList) } }) } } }
Example 147
Source File: ModelConverter.scala From play-swagger with MIT License | 5 votes |
package de.zalando.swagger import java.io.File import java.net.URI import de.zalando.apifirst.Application.StrictModel import de.zalando.apifirst.Domain.Type import de.zalando.apifirst.Hypermedia.{State, TransitionProperties} import de.zalando.apifirst.naming.Reference import de.zalando.swagger.strictModel.SwaggerModel trait ParameterNaming { type Types = Seq[Type] type NamedType = (Reference, Type) type NamedTypes = Seq[NamedType] } object ModelConverter extends ParameterNaming { def fromModel(base: URI, model: SwaggerModel, file: Option[File] = None, keyPrefix: String = "x-api-first", autoConvert: Boolean = true): StrictModel = { val converter = new TypeConverter(base, model, keyPrefix) val typeDefs = converter.convert val discriminators = converter.discriminators.toMap val inlineParameters = new ParametersConverter(base, model, keyPrefix, typeDefs, autoConvert).parameters val securityDefinitions = SecurityConverter.convertDefinitions(model.securityDefinitions) val apiCalls = new PathsConverter(base, model, keyPrefix, inlineParameters, securityDefinitions, file.map(_.getName)).convert val packageName = model.vendorExtensions.get(s"$keyPrefix-package") val inheritedPackageName = apiCalls.headOption collect { case h if apiCalls.seq.forall { _.handler.packageName == h.handler.packageName } => h.handler.packageName } val stateTransitionsTable = model.transitions map { case (fromName, toStates) => State(fromName) -> toStates.map { case (toName, props) => State(toName) -> TransitionProperties(Option(props).flatMap(_.get("condition").map(_.toString))) } } StrictModel(apiCalls, typeDefs.toMap, inlineParameters, discriminators, model.basePath, packageName orElse inheritedPackageName, stateTransitionsTable, securityDefinitions) } }
Example 148
Source File: StrictParseExamplesTest.scala From play-swagger with MIT License | 5 votes |
package de.zalando.swagger import java.io.File import java.net.URI import de.zalando.swagger.strictModel.SwaggerModel import org.scalatest.{FunSpec, MustMatchers} class StrictParseExamplesTest extends FunSpec with MustMatchers with ExpectedResults { val fixtures = new File(resourcesPath + "examples").listFiles ++ new File(resourcesPath + "schema_examples").listFiles describe("Strict Swagger Parser") { fixtures.filter(_.getName.endsWith(".yaml")).foreach { file => it(s"should parse the yaml swagger file ${file.getName} as specification") { val result = StrictYamlParser.parse(file) result._1 mustBe a [URI] result._2 mustBe a [SwaggerModel] } } } }
Example 149
Source File: S3Sandbox.scala From redshift-fake-driver with Apache License 2.0 | 5 votes |
package jp.ne.opt.redshiftfake import java.net.URI import com.amazonaws.auth.{AWSCredentials, BasicAWSCredentials} import com.amazonaws.regions.RegionUtils import com.amazonaws.services.s3.AmazonS3Client import org.gaul.s3proxy.{AuthenticationType, S3Proxy} import org.jclouds.ContextBuilder import org.jclouds.blobstore.BlobStoreContext import org.scalatest.{BeforeAndAfterAll, Suite} trait S3Sandbox extends BeforeAndAfterAll {this: Suite => val dummyCredentials: Credentials.WithKey val s3Endpoint: String var s3Proxy: S3Proxy = _ override def beforeAll(): Unit = { val blobContext: BlobStoreContext = ContextBuilder .newBuilder("transient") .build(classOf[BlobStoreContext]) s3Proxy = S3Proxy.builder .blobStore(blobContext.getBlobStore) .awsAuthentication(AuthenticationType.AWS_V4, dummyCredentials.accessKeyId, dummyCredentials.secretAccessKey) .endpoint(URI.create(s3Endpoint)) .build s3Proxy.start() } override def afterAll(): Unit = { s3Proxy.stop() } def createS3Client(s3Region: String): AmazonS3Client = { val credentials: AWSCredentials = new BasicAWSCredentials(dummyCredentials.accessKeyId, dummyCredentials.secretAccessKey) val client = new AmazonS3Client(credentials) client.setRegion(RegionUtils.getRegion(s3Region)) client.setEndpoint(s3Endpoint) client } }
Example 150
Source File: BigQueryTypeSpec.scala From shapeless-datatype with Apache License 2.0 | 5 votes |
package shapeless.datatype.bigquery import java.net.URI import com.fasterxml.jackson.databind.{ObjectMapper, SerializationFeature} import com.google.api.services.bigquery.model.TableRow import com.google.common.io.BaseEncoding import com.google.protobuf.ByteString import org.joda.time.{Instant, LocalDate, LocalDateTime, LocalTime} import org.scalacheck.Prop.forAll import org.scalacheck.ScalacheckShapeless._ import org.scalacheck._ import shapeless._ import shapeless.datatype.record._ import scala.reflect.runtime.universe._ object BigQueryTypeSpec extends Properties("BigQueryType") { import shapeless.datatype.test.Records._ import shapeless.datatype.test.SerializableUtils._ val mapper = new ObjectMapper().disable(SerializationFeature.FAIL_ON_EMPTY_BEANS) implicit def compareByteArrays(x: Array[Byte], y: Array[Byte]) = java.util.Arrays.equals(x, y) implicit def compareIntArrays(x: Array[Int], y: Array[Int]) = java.util.Arrays.equals(x, y) def roundTrip[A: TypeTag, L <: HList](m: A)(implicit gen: LabelledGeneric.Aux[A, L], fromL: FromTableRow[L], toL: ToTableRow[L], mr: MatchRecord[L] ): Boolean = { BigQuerySchema[A] // FIXME: verify the generated schema val t = ensureSerializable(BigQueryType[A]) val f1: SerializableFunction[A, TableRow] = new SerializableFunction[A, TableRow] { override def apply(m: A): TableRow = t.toTableRow(m) } val f2: SerializableFunction[TableRow, Option[A]] = new SerializableFunction[TableRow, Option[A]] { override def apply(m: TableRow): Option[A] = t.fromTableRow(m) } val toFn = ensureSerializable(f1) val fromFn = ensureSerializable(f2) val copy = fromFn(mapper.readValue(mapper.writeValueAsString(toFn(m)), classOf[TableRow])) val rm = RecordMatcher[A] copy.exists(rm(_, m)) } implicit val byteStringBigQueryMappableType = BigQueryType.at[ByteString]("BYTES")( x => ByteString.copyFrom(BaseEncoding.base64().decode(x.toString)), x => BaseEncoding.base64().encode(x.toByteArray) ) property("required") = forAll { m: Required => roundTrip(m) } property("optional") = forAll { m: Optional => roundTrip(m) } property("repeated") = forAll { m: Repeated => roundTrip(m) } property("mixed") = forAll { m: Mixed => roundTrip(m) } property("nested") = forAll { m: Nested => roundTrip(m) } property("seqs") = forAll { m: Seqs => roundTrip(m) } implicit val arbDate = Arbitrary(arbInstant.arbitrary.map(i => new LocalDate(i.getMillis))) implicit val arbTime = Arbitrary(arbInstant.arbitrary.map(i => new LocalTime(i.getMillis))) implicit val arbDateTime = Arbitrary( arbInstant.arbitrary.map(i => new LocalDateTime(i.getMillis)) ) case class DateTimeTypes( instant: Instant, date: LocalDate, time: LocalTime, dateTime: LocalDateTime ) property("date time types") = forAll { m: DateTimeTypes => roundTrip(m) } implicit val uriBigQueryType = BigQueryType.at[URI]("STRING")(v => URI.create(v.toString), _.toASCIIString) property("custom") = forAll { m: Custom => roundTrip(m) } }
Example 151
Source File: AvroTypeSpec.scala From shapeless-datatype with Apache License 2.0 | 5 votes |
package shapeless.datatype.avro import java.io.{ByteArrayInputStream, ByteArrayOutputStream} import java.net.URI import java.nio.ByteBuffer import com.google.protobuf.ByteString import org.apache.avro.Schema import org.apache.avro.generic.{GenericDatumReader, GenericDatumWriter, GenericRecord} import org.apache.avro.io.{DecoderFactory, EncoderFactory} import org.joda.time.Instant import org.scalacheck.Prop.forAll import org.scalacheck.ScalacheckShapeless._ import org.scalacheck._ import shapeless._ import shapeless.datatype.record._ import scala.reflect.runtime.universe._ object AvroTypeSpec extends Properties("AvroType") { import shapeless.datatype.test.Records._ import shapeless.datatype.test.SerializableUtils._ implicit def compareByteArrays(x: Array[Byte], y: Array[Byte]) = java.util.Arrays.equals(x, y) implicit def compareIntArrays(x: Array[Int], y: Array[Int]) = java.util.Arrays.equals(x, y) def roundTrip[A: TypeTag, L <: HList](m: A)(implicit gen: LabelledGeneric.Aux[A, L], fromL: FromAvroRecord[L], toL: ToAvroRecord[L], mr: MatchRecord[L] ): Boolean = { val t = ensureSerializable(AvroType[A]) val f1: SerializableFunction[A, GenericRecord] = new SerializableFunction[A, GenericRecord] { override def apply(m: A): GenericRecord = t.toGenericRecord(m) } val f2: SerializableFunction[GenericRecord, Option[A]] = new SerializableFunction[GenericRecord, Option[A]] { override def apply(m: GenericRecord): Option[A] = t.fromGenericRecord(m) } val toFn = ensureSerializable(f1) val fromFn = ensureSerializable(f2) val copy = fromFn(roundTripRecord(toFn(m))) val rm = RecordMatcher[A] copy.exists(rm(_, m)) } def roundTripRecord(r: GenericRecord): GenericRecord = { val writer = new GenericDatumWriter[GenericRecord](r.getSchema) val baos = new ByteArrayOutputStream() val encoder = EncoderFactory.get().binaryEncoder(baos, null) writer.write(r, encoder) encoder.flush() baos.close() val bytes = baos.toByteArray val reader = new GenericDatumReader[GenericRecord](r.getSchema) val bais = new ByteArrayInputStream(bytes) val decoder = DecoderFactory.get().binaryDecoder(bais, null) reader.read(null, decoder) } implicit val byteStringAvroType = AvroType.at[ByteString](Schema.Type.BYTES)( v => ByteString.copyFrom(v.asInstanceOf[ByteBuffer]), v => ByteBuffer.wrap(v.toByteArray) ) implicit val instantAvroType = AvroType.at[Instant](Schema.Type.LONG)(v => new Instant(v.asInstanceOf[Long]), _.getMillis) property("required") = forAll { m: Required => roundTrip(m) } property("optional") = forAll { m: Optional => roundTrip(m) } property("repeated") = forAll { m: Repeated => roundTrip(m) } property("mixed") = forAll { m: Mixed => roundTrip(m) } property("nested") = forAll { m: Nested => roundTrip(m) } property("seqs") = forAll { m: Seqs => roundTrip(m) } implicit val uriAvroType = AvroType.at[URI](Schema.Type.STRING)(v => URI.create(v.toString), _.toString) property("custom") = forAll { m: Custom => roundTrip(m) } }
Example 152
Source File: DatastoreTypeSpec.scala From shapeless-datatype with Apache License 2.0 | 5 votes |
package shapeless.datatype.datastore import java.net.URI import com.google.datastore.v1.Entity import com.google.datastore.v1.client.DatastoreHelper._ import org.scalacheck.Prop.{all, forAll} import org.scalacheck.ScalacheckShapeless._ import org.scalacheck._ import shapeless._ import shapeless.datatype.record._ object DatastoreTypeSpec extends Properties("DatastoreType") { import shapeless.datatype.test.Records._ import shapeless.datatype.test.SerializableUtils._ implicit def compareByteArrays(x: Array[Byte], y: Array[Byte]) = java.util.Arrays.equals(x, y) implicit def compareIntArrays(x: Array[Int], y: Array[Int]) = java.util.Arrays.equals(x, y) def roundTrip[A, L <: HList](m: A)(implicit gen: LabelledGeneric.Aux[A, L], fromL: FromEntity[L], toL: ToEntity[L], mr: MatchRecord[L] ): Prop = { val t = ensureSerializable(DatastoreType[A]) val f1: SerializableFunction[A, Entity] = new SerializableFunction[A, Entity] { override def apply(m: A): Entity = t.toEntity(m) } val f2: SerializableFunction[Entity, Option[A]] = new SerializableFunction[Entity, Option[A]] { override def apply(m: Entity): Option[A] = t.fromEntity(m) } val f3: SerializableFunction[A, Entity.Builder] = new SerializableFunction[A, Entity.Builder] { override def apply(m: A): Entity.Builder = t.toEntityBuilder(m) } val f4: SerializableFunction[Entity.Builder, Option[A]] = new SerializableFunction[Entity.Builder, Option[A]] { override def apply(m: Entity.Builder): Option[A] = t.fromEntityBuilder(m) } val toFn1 = ensureSerializable(f1) val fromFn1 = ensureSerializable(f2) val toFn2 = ensureSerializable(f3) val fromFn2 = ensureSerializable(f4) val copy1 = fromFn1(toFn1(m)) val copy2 = fromFn2(toFn2(m)) val rm = RecordMatcher[A] all(copy1.exists(rm(_, m)), copy2.exists(rm(_, m))) } property("required") = forAll { m: Required => roundTrip(m) } property("optional") = forAll { m: Optional => roundTrip(m) } property("repeated") = forAll { m: Repeated => roundTrip(m) } property("mixed") = forAll { m: Mixed => roundTrip(m) } property("nested") = forAll { m: Nested => roundTrip(m) } property("seqs") = forAll { m: Seqs => roundTrip(m) } implicit val uriDatastoreType = DatastoreType.at[URI](v => URI.create(v.getStringValue), u => makeValue(u.toString).build()) property("custom") = forAll { m: Custom => roundTrip(m) } }
Example 153
Source File: TensorFlowTypeSpec.scala From shapeless-datatype with Apache License 2.0 | 5 votes |
package shapeless.datatype.tensorflow import java.net.URI import org.joda.time.Instant import org.scalacheck.Prop.{all, forAll} import org.scalacheck.ScalacheckShapeless._ import org.scalacheck._ import org.tensorflow.example.Example import shapeless._ import shapeless.datatype.record._ object TensorFlowTypeSpec extends Properties("TensorFlowType") { import shapeless.datatype.test.Records._ import shapeless.datatype.test.SerializableUtils._ implicit def compareByteArrays(x: Array[Byte], y: Array[Byte]) = java.util.Arrays.equals(x, y) implicit def compareIntArrays(x: Array[Int], y: Array[Int]) = java.util.Arrays.equals(x, y) implicit def compareDouble(x: Double, y: Double) = x.toFloat == y.toFloat def roundTrip[A, L <: HList](m: A)(implicit gen: LabelledGeneric.Aux[A, L], fromL: FromFeatures[L], toL: ToFeatures[L], mr: MatchRecord[L] ): Prop = { val t = ensureSerializable(TensorFlowType[A]) val f1: SerializableFunction[A, Example] = new SerializableFunction[A, Example] { override def apply(m: A): Example = t.toExample(m) } val f2: SerializableFunction[Example, Option[A]] = new SerializableFunction[Example, Option[A]] { override def apply(m: Example): Option[A] = t.fromExample(m) } val f3: SerializableFunction[A, Example.Builder] = new SerializableFunction[A, Example.Builder] { override def apply(m: A): Example.Builder = t.toExampleBuilder(m) } val f4: SerializableFunction[Example.Builder, Option[A]] = new SerializableFunction[Example.Builder, Option[A]] { override def apply(m: Example.Builder): Option[A] = t.fromExampleBuilder(m) } val toFn1 = ensureSerializable(f1) val fromFn1 = ensureSerializable(f2) val toFn2 = ensureSerializable(f3) val fromFn2 = ensureSerializable(f4) val copy1 = fromFn1(toFn1(m)) val copy2 = fromFn2(toFn2(m)) val rm = RecordMatcher[A] all(copy1.exists(rm(_, m)), copy2.exists(rm(_, m))) } implicit val timestampTensorFlowMappableType = TensorFlowType.at[Instant]( TensorFlowType.toLongs(_).map(new Instant(_)), xs => TensorFlowType.fromLongs(xs.map(_.getMillis)) ) property("required") = forAll { m: Required => roundTrip(m) } property("optional") = forAll { m: Optional => roundTrip(m) } property("repeated") = forAll { m: Repeated => roundTrip(m) } property("mixed") = forAll { m: Mixed => roundTrip(m) } property("seqs") = forAll { m: Seqs => roundTrip(m) } implicit val uriTensorFlowType = TensorFlowType.at[URI]( TensorFlowType.toStrings(_).map(URI.create), xs => TensorFlowType.fromStrings(xs.map(_.toString)) ) property("custom") = forAll { m: Custom => roundTrip(m) } }
Example 154
Source File: Records.scala From shapeless-datatype with Apache License 2.0 | 5 votes |
package shapeless.datatype.test import java.net.URI import com.google.protobuf.ByteString import org.joda.time.Instant import org.scalacheck._ object Records { case class Required( booleanField: Boolean, intField: Int, longField: Long, floatField: Float, doubleField: Double, stringField: String, byteStringField: ByteString, byteArrayField: Array[Byte], timestampField: Instant ) case class Optional( booleanField: Option[Boolean], intField: Option[Int], longField: Option[Long], floatField: Option[Float], doubleField: Option[Double], stringField: Option[String], byteStringField: Option[ByteString], byteArrayField: Option[Array[Byte]], timestampField: Option[Instant] ) case class Repeated( booleanField: List[Boolean], intField: List[Int], longField: List[Long], floatField: List[Float], doubleField: List[Double], stringField: List[String], byteStringField: List[ByteString], byteArrayField: List[Array[Byte]], timestampField: List[Instant] ) case class Mixed( longField: Long, doubleField: Double, stringField: String, longFieldO: Option[Long], doubleFieldO: Option[Double], stringFieldO: Option[String], longFieldR: List[Long], doubleFieldR: List[Double], stringFieldR: List[String] ) case class Nested( longField: Long, longFieldO: Option[Long], longFieldR: List[Long], mixedField: Mixed, mixedFieldO: Option[Mixed], mixedFieldR: List[Mixed] ) case class Seqs(array: Array[Int], list: List[Int], vector: Vector[Int]) case class Custom(uriField: URI, uriFieldO: Option[URI], uriFieldR: List[URI]) implicit val arbByteString = Arbitrary(Gen.alphaStr.map(ByteString.copyFromUtf8)) implicit val arbInstant = Arbitrary(Gen.chooseNum(0, Int.MaxValue).map(new Instant(_))) implicit val arbUri = Arbitrary(Gen.alphaStr.map(URI.create)) }
Example 155
Source File: HttpTimeoutSpec.scala From http-verbs with Apache License 2.0 | 5 votes |
package uk.gov.hmrc.play.http import java.net.{ServerSocket, URI} import java.util.concurrent.TimeoutException import org.scalatest.concurrent.ScalaFutures import org.scalatest.BeforeAndAfterAll import org.scalatest.wordspec.AnyWordSpecLike import org.scalatest.matchers.should.Matchers import org.webbitserver.handler.{DelayedHttpHandler, StringHttpHandler} import org.webbitserver.netty.NettyWebServer import play.api.Play import play.api.test.FakeApplication import uk.gov.hmrc.http.HeaderCarrier import uk.gov.hmrc.play.http.ws.WSHttp import uk.gov.hmrc.play.test.TestHttpCore import scala.concurrent.Await import scala.concurrent.duration.DurationInt import scala.concurrent.ExecutionContext.Implicits.global class HttpTimeoutSpec extends AnyWordSpecLike with Matchers with ScalaFutures with BeforeAndAfterAll { lazy val fakeApplication = FakeApplication(additionalConfiguration = Map("ws.timeout.request" -> "1000")) override def beforeAll() { super.beforeAll() Play.start(fakeApplication) } override def afterAll() { super.afterAll() Play.stop(fakeApplication) } "HttpCalls" should { "be gracefully timeout when no response is received within the 'timeout' frame" in { val http = new WSHttp with TestHttpCore // get an unused port val ss = new ServerSocket(0) ss.close() val publicUri = URI.create(s"http://localhost:${ss.getLocalPort}") val ws = new NettyWebServer(global, ss.getLocalSocketAddress, publicUri) try { //starts web server ws.add( "/test", new DelayedHttpHandler(global, 2000, new StringHttpHandler("application/json", "{name:'pong'}"))) ws.start().get() implicit val hc = HeaderCarrier() val start = System.currentTimeMillis() intercept[TimeoutException] { //make request to web server Await.result(http.doPost(s"$publicUri/test", "{name:'ping'}", Seq()), 5.seconds) } val diff = (System.currentTimeMillis() - start).toInt // there is test execution delay around 700ms diff should be >= 1000 diff should be < 2500 } finally { ws.stop() } } } }
Example 156
Source File: HttpTimeoutSpec.scala From http-verbs with Apache License 2.0 | 5 votes |
package uk.gov.hmrc.play.http import java.net.{ServerSocket, URI} import java.util.concurrent.TimeoutException import org.scalatest.concurrent.ScalaFutures import org.scalatest.BeforeAndAfterAll import org.scalatest.wordspec.AnyWordSpecLike import org.scalatest.matchers.should.Matchers import org.webbitserver.handler.{DelayedHttpHandler, StringHttpHandler} import org.webbitserver.netty.NettyWebServer import play.api.inject.guice.GuiceApplicationBuilder import play.api.libs.ws.WSClient import play.api.test.WsTestClient import play.api.{Configuration, Play} import uk.gov.hmrc.http.HeaderCarrier import uk.gov.hmrc.play.http.ws.WSHttp import uk.gov.hmrc.play.test.TestHttpCore import scala.concurrent.{Await, ExecutionContext} import scala.concurrent.duration.DurationInt class HttpTimeoutSpec extends AnyWordSpecLike with Matchers with ScalaFutures with BeforeAndAfterAll { import ExecutionContext.Implicits.global lazy val fakeApplication = GuiceApplicationBuilder(configuration = Configuration("play.ws.timeout.request" -> "1000ms")).build() override def beforeAll() { super.beforeAll() Play.start(fakeApplication) } override def afterAll() { super.afterAll() Play.stop(fakeApplication) } WsTestClient.withClient{ client => "HttpCalls" should { "be gracefully timeout when no response is received within the 'timeout' frame" in { val http = new WSHttp with TestHttpCore { override val wsClient = fakeApplication.injector.instanceOf[WSClient] } // get an unused port val ss = new ServerSocket(0) ss.close() val executor = ExecutionContext.global // fromExecutorService(ExecutionContext.global) val publicUri = URI.create(s"http://localhost:${ss.getLocalPort}") val ws = new NettyWebServer(executor, ss.getLocalSocketAddress, publicUri) try { //starts web server ws.add( "/test", new DelayedHttpHandler(executor, 2000, new StringHttpHandler("application/json", "{name:'pong'}"))) ws.start().get() implicit val hc = HeaderCarrier() val start = System.currentTimeMillis() intercept[TimeoutException] { //make request to web server Await.result(http.doPost(s"$publicUri/test", "{name:'ping'}", Seq()), 5.seconds) } val diff = (System.currentTimeMillis() - start).toInt // there is test execution delay around 700ms diff should be >= 1000 diff should be < 2500 } finally { ws.stop() } } } } }
Example 157
Source File: LocalDBSCANArcherySuite.scala From dbscan-on-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.mllib.clustering.dbscan import java.net.URI import scala.io.Source import org.scalatest.FunSuite import org.scalatest.Matchers import org.apache.spark.mllib.linalg.Vectors class LocalDBSCANArcherySuite extends FunSuite with Matchers { private val dataFile = "labeled_data.csv" test("should cluster") { val labeled: Map[DBSCANPoint, Double] = new LocalDBSCANArchery(eps = 0.3F, minPoints = 10) .fit(getRawData(dataFile)) .map(l => (l, l.cluster.toDouble)) .toMap val expected: Map[DBSCANPoint, Double] = getExpectedData(dataFile).toMap labeled.foreach { case (key, value) => { val t = expected(key) if (t != value) { println(s"expected: $t but got $value for $key") } } } labeled should equal(expected) } def getExpectedData(file: String): Iterator[(DBSCANPoint, Double)] = { Source .fromFile(getFile(file)) .getLines() .map(s => { val vector = Vectors.dense(s.split(',').map(_.toDouble)) val point = DBSCANPoint(vector) (point, vector(2)) }) } def getRawData(file: String): Iterable[DBSCANPoint] = { Source .fromFile(getFile(file)) .getLines() .map(s => DBSCANPoint(Vectors.dense(s.split(',').map(_.toDouble)))) .toIterable } def getFile(filename: String): URI = { getClass.getClassLoader.getResource(filename).toURI } }
Example 158
Source File: AuditMessages.scala From effpi with MIT License | 5 votes |
// Effpi - verified message-passing programs in Dotty // Copyright 2019 Alceste Scalas and Elias Benussi // Released under the MIT License: https://opensource.org/licenses/MIT package effpi.examples.audit import effpi.actor.ActorRef import java.net.URI import java.util.UUID case class ReqMoney[R](from: URI, amount: BigDecimal, msg: R, replyTo: ActorRef[R]) case class LogActivity(who: ActorRef[Nothing], what: String, id: Long, replyTo: ActorRef[ActivityLogged]) case class ActivityLogged(who: ActorRef[Nothing], id: Long) sealed trait PaymentService case class Authorize(payer: URI, amount: BigDecimal, id: UUID, replyTo: ActorRef[PaymentResult]) extends PaymentService case class Capture(id: UUID, amount: BigDecimal, replyTo: ActorRef[PaymentResult]) extends PaymentService case class Void(id: UUID, replyTo: ActorRef[PaymentResult]) extends PaymentService case class Refund(id: UUID, replyTo: ActorRef[PaymentResult]) extends PaymentService sealed trait PaymentResult case class PaymentSuccess(id: UUID) extends PaymentResult case class PaymentRejected(id: UUID, reason: String) extends PaymentResult case class IdUnkwown(id: UUID) extends PaymentResult
Example 159
Source File: SqsSource.scala From bahir with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.streaming.sqs import java.net.URI import org.apache.hadoop.fs.Path import org.apache.spark.internal.Logging import org.apache.spark.sql.{DataFrame, Dataset, SparkSession} import org.apache.spark.sql.execution.datasources.{DataSource, LogicalRelation} import org.apache.spark.sql.execution.streaming._ import org.apache.spark.sql.execution.streaming.FileStreamSource._ import org.apache.spark.sql.types.StructType class SqsSource(sparkSession: SparkSession, metadataPath: String, options: Map[String, String], override val schema: StructType) extends Source with Logging { private val sourceOptions = new SqsSourceOptions(options) private val hadoopConf = sparkSession.sessionState.newHadoopConf() private val metadataLog = new FileStreamSourceLog(FileStreamSourceLog.VERSION, sparkSession, metadataPath) private var metadataLogCurrentOffset = metadataLog.getLatest().map(_._1).getOrElse(-1L) private val maxFilesPerTrigger = sourceOptions.maxFilesPerTrigger private val maxFileAgeMs: Long = sourceOptions.maxFileAgeMs private val fileFormatClassName = sourceOptions.fileFormatClassName private val shouldSortFiles = sourceOptions.shouldSortFiles private val sqsClient = new SqsClient(sourceOptions, hadoopConf) metadataLog.allFiles().foreach { entry => sqsClient.sqsFileCache.add(entry.path, MessageDescription(entry.timestamp, true, "")) } sqsClient.sqsFileCache.purge() logInfo(s"maxFilesPerBatch = $maxFilesPerTrigger, maxFileAgeMs = $maxFileAgeMs") val batchFiles = sqsClient.sqsFileCache.getUncommittedFiles(maxFilesPerTrigger, shouldSortFiles) if (batchFiles.nonEmpty) { metadataLogCurrentOffset += 1 metadataLog.add(metadataLogCurrentOffset, batchFiles.map { case (path, timestamp, receiptHandle) => FileEntry(path = path, timestamp = timestamp, batchId = metadataLogCurrentOffset) }.toArray) logInfo(s"Log offset set to $metadataLogCurrentOffset with ${batchFiles.size} new files") val messageReceiptHandles = batchFiles.map { case (path, timestamp, receiptHandle) => sqsClient.sqsFileCache.markCommitted(path) logDebug(s"New file: $path") receiptHandle }.toList sqsClient.addToDeleteMessageQueue(messageReceiptHandles) } val numPurged = sqsClient.sqsFileCache.purge() if (!sqsClient.deleteMessageQueue.isEmpty) { sqsClient.deleteMessagesFromQueue() } logTrace( s""" |Number of files selected for batch = ${batchFiles.size} |Number of files purged from tracking map = $numPurged """.stripMargin) FileStreamSourceOffset(metadataLogCurrentOffset) } override def getOffset: Option[Offset] = Some(fetchMaxOffset()).filterNot(_.logOffset == -1) override def commit(end: Offset): Unit = { // No-op for now; SqsSource currently garbage-collects files based on timestamp // and the value of the maxFileAge parameter. } override def stop(): Unit = { if (!sqsClient.sqsScheduler.isTerminated) { sqsClient.sqsScheduler.shutdownNow() } } override def toString: String = s"SqsSource[${sqsClient.sqsUrl}]" }
Example 160
Source File: MQTTTestUtils.scala From bahir with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.mqtt import java.net.{ServerSocket, URI} import java.nio.charset.StandardCharsets import scala.language.postfixOps import org.apache.activemq.broker.{BrokerService, TransportConnector} import org.apache.activemq.usage.SystemUsage import org.apache.commons.lang3.RandomUtils import org.eclipse.paho.client.mqttv3._ import org.eclipse.paho.client.mqttv3.persist.MqttDefaultFilePersistence import org.apache.spark.SparkConf import org.apache.spark.internal.Logging import org.apache.spark.util.Utils private[mqtt] class MQTTTestUtils extends Logging { private val persistenceDir = Utils.createTempDir() private val brokerHost = "localhost" private val brokerPort = findFreePort() private var broker: BrokerService = _ private var systemUsage: SystemUsage = _ private var connector: TransportConnector = _ def brokerUri: String = { s"$brokerHost:$brokerPort" } def setup(): Unit = { broker = new BrokerService() broker.setDataDirectoryFile(Utils.createTempDir()) broker.getSystemUsage().setSendFailIfNoSpace(false) systemUsage = broker.getSystemUsage() systemUsage.getStoreUsage().setLimit(1024L * 1024 * 256); // 256 MB (default: 100 GB) systemUsage.getTempUsage().setLimit(1024L * 1024 * 128); // 128 MB (default: 50 GB) connector = new TransportConnector() connector.setName("mqtt") connector.setUri(new URI("mqtt://" + brokerUri)) broker.addConnector(connector) broker.start() } def teardown(): Unit = { if (broker != null) { broker.stop() broker = null } if (connector != null) { connector.stop() connector = null } Utils.deleteRecursively(persistenceDir) } private def findFreePort(): Int = { val candidatePort = RandomUtils.nextInt(1024, 65536) Utils.startServiceOnPort(candidatePort, (trialPort: Int) => { val socket = new ServerSocket(trialPort) socket.close() (null, trialPort) }, new SparkConf())._2 } def publishData(topic: String, data: String): Unit = { var client: MqttClient = null try { val persistence = new MqttDefaultFilePersistence(persistenceDir.getAbsolutePath) client = new MqttClient("tcp://" + brokerUri, MqttClient.generateClientId(), persistence) client.connect() if (client.isConnected) { val msgTopic = client.getTopic(topic) val message = new MqttMessage(data.getBytes(StandardCharsets.UTF_8)) message.setQos(1) message.setRetained(true) for (i <- 0 to 10) { try { msgTopic.publish(message) } catch { case e: MqttException if e.getReasonCode == MqttException.REASON_CODE_MAX_INFLIGHT => // wait for Spark streaming to consume something from the message queue Thread.sleep(50) } } } } finally { if (client != null) { client.disconnect() client.close() client = null } } } }
Example 161
Source File: ExternalVariablesLoader.scala From incubator-daffodil with Apache License 2.0 | 5 votes |
package org.apache.daffodil.externalvars import scala.xml.parsing.ConstructingParser import java.io.File import java.net.URI import scala.xml.Node import scala.io.Codec.string2codec import org.apache.daffodil.processors.{ VariableUtils, VariableMap } import org.apache.daffodil.exceptions.Assert import org.apache.daffodil.util.Misc._ import org.apache.daffodil.exceptions.ThrowsSDE import scala.collection.immutable.Queue object ExternalVariablesLoader { def loadVariables(bindings: Seq[Binding], referringContext: ThrowsSDE, vmap: VariableMap): VariableMap = { Assert.usage(referringContext != null, "loadVariables expects 'referringContext' to not be null!") VariableUtils.setExternalVariables(vmap, bindings, referringContext) vmap } // The following are methods that retrieve and transform variables into Seq[Binding] def mapToBindings(vars: Map[String, String]): Queue[Binding] = { val varsKVP = vars.map { case (name, value) => { Binding(name, value) } } Queue.empty.enqueue(varsKVP) } def uriToBindings(uri: URI): Queue[Binding] = { Assert.usage(uri ne null) val file = new File(uri) fileToBindings(file) } def fileToBindings(file: File): Queue[Binding] = { Assert.usage(file ne null) ExternalVariablesValidator.validate(file) match { case Left(ex) => Assert.abort(ex) case Right(_) => // Success } val enc = determineEncoding(file) // The encoding is needed for ConstructingParser val input = scala.io.Source.fromURI(file.toURI)(enc) val node = ConstructingParser.fromSource(input, true).document.docElem nodeToBindings(node) } def nodeToBindings(node: Node): Queue[Binding] = { Assert.usage(node ne null) val newBindings = Binding.getBindings(node) var res = Queue.empty[Binding] // couldn't get the enqueue(iterable) method overload to resolve. // So just doing this one by one newBindings.foreach{ b => res = res.enqueue(b) } res } }
Example 162
Source File: Validator.scala From incubator-daffodil with Apache License 2.0 | 5 votes |
package org.apache.daffodil.util import javax.xml.transform.stream.StreamSource import javax.xml.XMLConstants import scala.xml.parsing.NoBindingFactoryAdapter import java.net.URI import org.apache.daffodil.xml.DFDLCatalogResolver import scala.collection.mutable import org.xml.sax.ErrorHandler object Validator extends NoBindingFactoryAdapter { private type CacheType = mutable.HashMap[Seq[String], javax.xml.validation.Validator] private val validationSchemaCache = new ThreadLocal[CacheType] { override def initialValue = new CacheType } def validateXMLSources(schemaFileNames: Seq[String], document: java.io.InputStream, errHandler: ErrorHandler): Unit = { val cache = validationSchemaCache.get() val validator = { val optCachedValidator = cache.get(schemaFileNames) optCachedValidator match { case Some(validator) => { validator.reset() validator } case None => { val schemaSources: Seq[javax.xml.transform.Source] = schemaFileNames.map { fn => { val uri = new URI(fn) val is = uri.toURL.openStream() val stream = new StreamSource(is) stream.setSystemId(uri.toString) // must set this so that relative URIs will be created for import/include files. stream } } val factory = new org.apache.xerces.jaxp.validation.XMLSchemaFactory() factory.setErrorHandler(errHandler) val resolver = DFDLCatalogResolver.get factory.setResourceResolver(resolver) val schema = factory.newSchema(schemaSources.toArray) val validator = schema.newValidator() validator.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true) // validator.setFeature("http://xml.org/sax/features/validation", true) // If you enable the feature below, it seems to do no validation at all. Just passes. // validator.setFeature("http://apache.org/xml/features/validation/dynamic", true) validator.setFeature("http://apache.org/xml/features/validation/schema", true) validator.setFeature("http://apache.org/xml/features/validation/schema-full-checking", true) validator.setErrorHandler(errHandler) validator.setResourceResolver(resolver) cache.put(schemaFileNames, validator) validator } } } val documentSource = new StreamSource(document) validator.validate(documentSource) } }
Example 163
Source File: StringJvmTests.scala From mouse with MIT License | 5 votes |
package mouse import java.net.{MalformedURLException, URI, URISyntaxException, URL} import cats.Eq import cats.syntax.all._ import mouse.string._ class StringJvmTests extends MouseSuite { test("parseFloat") { "123.1".parseFloat should ===(123.1f.asRight[NumberFormatException]) } test("parseURL") { implicit val urlEq: Eq[URL] = Eq.fromUniversalEquals implicit val malformedURLExceptionEq: Eq[MalformedURLException] = new Eq[MalformedURLException] { override def eqv(x: MalformedURLException, y: MalformedURLException): Boolean = x.getMessage == y.getMessage } "http://example.com".parseURL should ===(new URL("http://example.com").asRight[MalformedURLException]) "blah".parseURL should ===(new MalformedURLException("no protocol: blah").asLeft) } test("parseURI") { implicit val urlEq: Eq[URI] = Eq.fromUniversalEquals implicit val malformedURIExceptionEq: Eq[URISyntaxException] = new Eq[URISyntaxException] { override def eqv(x: URISyntaxException, y: URISyntaxException): Boolean = x.getMessage == y.getMessage } "http://example.com".parseURI should ===(new URI("http://example.com").asRight[URISyntaxException]) "invalid uri".parseURI should ===(new URISyntaxException("invalid uri", "Illegal character in path at index 7").asLeft) } }
Example 164
Source File: InceptionModel.scala From flink-tensorflow with Apache License 2.0 | 5 votes |
package org.apache.flink.contrib.tensorflow.examples.inception import java.net.URI import java.nio.charset.StandardCharsets import org.apache.flink.contrib.tensorflow.examples.inception.LabelMethod._ import org.apache.flink.contrib.tensorflow.graphs.{DefaultGraphLoader, GraphLoader, GraphMethod} import org.apache.flink.contrib.tensorflow.models.generic.GenericModel import org.apache.flink.contrib.tensorflow.models.ModelFunction import org.apache.flink.contrib.tensorflow.util.GraphUtils import org.apache.flink.core.fs.Path import org.slf4j.{Logger, LoggerFactory} import org.tensorflow.Tensor import org.tensorflow.contrib.scala._ import org.tensorflow.framework.{SignatureDef, TensorInfo} import scala.collection.JavaConverters._ sealed trait LabelMethod extends GraphMethod { def name = LABEL_METHOD_NAME override type Input = ImageTensor override type Output = LabelTensor } @SerialVersionUID(1L) object LabelMethod { val LABEL_METHOD_NAME = "inception/label" val LABEL_INPUTS = "inputs" val LABEL_OUTPUTS = "outputs" def toTextLabels(take: Int = 3)(implicit model: InceptionModel): Array[LabeledImage] = { // the tensor consists of a row per image, with columns representing label probabilities require(t.numDimensions() == 2, "expected a [M N] shaped tensor") val matrix = Array.ofDim[Float](t.shape()(0).toInt,t.shape()(1).toInt) t.copyTo(matrix) matrix.map { row => LabeledImage(row.toList.zip(model.labels).sortWith(_._1 > _._1).take(take)) } } } }
Example 165
Source File: PowerBIAuthenticationWithAuthorizationCode.scala From spark-powerbi-connector with Apache License 2.0 | 5 votes |
package com.microsoft.azure.powerbi.authentication import java.net.URI import java.util.concurrent.{Executors, ExecutorService, Future} import javax.naming.ServiceUnavailableException import com.microsoft.aad.adal4j.{AuthenticationContext, AuthenticationResult} case class PowerBIAuthenticationWithAuthorizationCode(powerBIAuthorityURL: String, powerBIResourceURL: String, powerBIClientID: String, activeDirectoryAuthorizationCode: String, activeDirectoryRedirectUri: URI) extends PowerBIAuthentication{ def getAccessToken: String = if (this.accessToken != null && this.accessToken.nonEmpty) this.accessToken else refreshAccessToken def refreshAccessToken: String = retrieveToken.getAccessToken private def retrieveToken: AuthenticationResult = { var authenticationResult: AuthenticationResult = null var executorService: ExecutorService = null try { executorService = Executors.newFixedThreadPool(1) val authenticationContext: AuthenticationContext = new AuthenticationContext(powerBIAuthorityURL, true, executorService) val authenticationResultFuture: Future[AuthenticationResult] = authenticationContext.acquireTokenByAuthorizationCode(activeDirectoryAuthorizationCode, powerBIResourceURL, powerBIClientID, activeDirectoryRedirectUri, null) authenticationResult = authenticationResultFuture.get() } finally { executorService.shutdown() } if (authenticationResult == null) { throw new ServiceUnavailableException("Authentication result empty") } this.accessToken = authenticationResult.getAccessToken authenticationResult } private var accessToken: String = _ }
Example 166
Source File: PowerBIAuthenticationWithUsernamePassword.scala From spark-powerbi-connector with Apache License 2.0 | 5 votes |
package com.microsoft.azure.powerbi.authentication import java.net.URI import java.util.concurrent.{Executors, ExecutorService, Future} import javax.naming.ServiceUnavailableException import com.microsoft.aad.adal4j.{AuthenticationContext, AuthenticationResult} case class PowerBIAuthenticationWithUsernamePassword(powerBIAuthorityURL: String, powerBIResourceURL: String, powerBIClientID: String, activeDirectoryUsername: String, activeDirectoryPassword: String) extends PowerBIAuthentication{ def getAccessToken: String = if (this.accessToken != null && this.accessToken.nonEmpty) this.accessToken else refreshAccessToken def refreshAccessToken: String = retrieveToken.getAccessToken private def retrieveToken: AuthenticationResult = { var authenticationResult: AuthenticationResult = null var executorService: ExecutorService = null try { executorService = Executors.newFixedThreadPool(1) val authenticationContext: AuthenticationContext = new AuthenticationContext(powerBIAuthorityURL, true, executorService) val authenticationResultFuture: Future[AuthenticationResult] = authenticationContext.acquireToken(powerBIResourceURL, powerBIClientID, activeDirectoryUsername, activeDirectoryPassword, null) authenticationResult = authenticationResultFuture.get() } finally { executorService.shutdown() } if (authenticationResult == null) { throw new ServiceUnavailableException("Authentication result empty") } this.accessToken = authenticationResult.getAccessToken authenticationResult } private var accessToken: String = _ }
Example 167
Source File: KubeServiceLocatorServer.scala From lagom-on-kube with Apache License 2.0 | 5 votes |
package me.alexray.lagom.kube.discovery import java.io.Closeable import java.net.URI import java.util.{Map => JMap} import com.lightbend.lagom.gateway.{ServiceGateway, ServiceGatewayConfig, ServiceGatewayFactory} import me.alexray.lagom.kube.discovery.impl.KubeServiceRegistryModule import me.alexray.lagom.kube.gateway.{KubeServiceGateway, KubeServiceGatewayConfig, KubeServiceGatewayFactory} import play.api.Application import play.api.Logger import play.api.Mode import play.api.Play import play.api.inject.guice.GuiceApplicationBuilder import play.api.inject.guice.GuiceableModule.fromGuiceModule import play.core.server.ServerConfig import play.core.server.ServerProvider import play.core.server.ServerWithStop import scala.util.control.NonFatal class KubeServiceLocatorServer extends Closeable { private val logger: Logger = Logger(this.getClass) @volatile private var server: ServerWithStop = _ @volatile private var gateway: KubeServiceGateway = _ def start(serviceLocatorPort: Int, serviceGatewayPort: Int, unmanagedServices: JMap[String, String]): Unit = synchronized { require(server == null, "Service locator is already running on " + server.mainAddress) val application = createApplication(KubeServiceGatewayConfig(serviceGatewayPort), unmanagedServices) Play.start(application) try { server = createServer(application, serviceLocatorPort) } catch { case NonFatal(e) => throw new RuntimeException(s"Unable to start service locator on port $serviceLocatorPort", e) } try { gateway = application.injector.instanceOf[KubeServiceGatewayFactory].start() } catch { case NonFatal(e) => throw new RuntimeException(s"Unable to start service gateway on port $serviceGatewayPort", e) } logger.info("Service locator can be reached at " + serviceLocatorAddress) logger.info("Service gateway can be reached at " + serviceGatewayAddress) } private def createApplication(serviceGatewayConfig: KubeServiceGatewayConfig, unmanagedServices: JMap[String, String]): Application = { new GuiceApplicationBuilder() .overrides(KubeServiceRegistryModule(serviceGatewayConfig, unmanagedServices)) .build() } private def createServer(application: Application, port: Int): ServerWithStop = { val config = ServerConfig(port = Some(port), mode = Mode.Test) val provider = implicitly[ServerProvider] provider.createServer(config, application) } override def close(): Unit = synchronized { if (server == null) Logger.logger.debug("Service locator was already stopped") else { logger.debug("Stopping service locator...") server.stop() server = null logger.info("Service locator stopped") } } def serviceLocatorAddress: URI = { // Converting InetSocketAddress into URL is not that simple. // Because we know the service locator is running locally, I'm hardcoding the hostname and protocol. new URI(s"http://localhost:${server.mainAddress.getPort}") } def serviceGatewayAddress: URI = { new URI(s"http://localhost:${gateway.address.getPort}") } }
Example 168
Source File: KubeServiceRegistryImpl.scala From lagom-on-kube with Apache License 2.0 | 5 votes |
package me.alexray.lagom.kube.discovery.impl import java.net.URI import java.util.concurrent.TimeUnit import javax.inject.{Inject, Named} import akka.NotUsed import akka.actor.ActorRef import akka.pattern.ask import akka.util.Timeout import com.lightbend.lagom.internal.javadsl.registry.{RegisteredService, ServiceRegistry, ServiceRegistryService} import com.lightbend.lagom.javadsl.api.ServiceCall import com.lightbend.lagom.javadsl.api.transport.NotFound import org.pcollections.PSequence import play.api.Logger import scala.concurrent.Future import scala.concurrent.duration.Duration import me.alexray.lagom.kube.discovery.KubeServiceRegistryActor import scala.language.implicitConversions class KubeServiceRegistryImpl @Inject() (@Named(KubeServiceRegistryModule.KUBE_SERVICE_REGISTRY_ACTOR) registry: ActorRef) extends ServiceRegistry { import me.alexray.lagom.converters.ServiceCallConverter._ private val logger: Logger = Logger(this.getClass) implicit val timeout = Timeout(Duration.create(5, TimeUnit.SECONDS)) import scala.concurrent.ExecutionContext.Implicits.global override def register(name: String): ServiceCall[ServiceRegistryService, NotUsed] = (service: ServiceRegistryService) => { logger.debug("register invoked, name=[" + name + "], request=[" + service + "]") (registry ? KubeServiceRegistryActor.Register(name, service)).map(_ => NotUsed) } override def unregister(name: String): ServiceCall[NotUsed, NotUsed] = (request: NotUsed) => { logger.debug("unregister invoked, name=[" + name + "], request=[" + request + "]") registry ! KubeServiceRegistryActor.Remove(name) Future.successful(NotUsed) } override def lookup(name: String): ServiceCall[NotUsed, URI] = (request: NotUsed) => { logger.debug("locate invoked, name=[" + name + "], request=[" + request + "]") (registry ? KubeServiceRegistryActor.Lookup(name)).mapTo[Option[URI]].map { case Some(uri) => logger.debug("Location of service name=[" + name + "] is " + uri) uri case None => logger.debug("Service name=[" + name + "] has not been registered") throw new NotFound(name) } } override def registeredServices(): ServiceCall[NotUsed, PSequence[RegisteredService]] = (request: NotUsed) => { (registry ? KubeServiceRegistryActor.GetRegisteredServices).mapTo[KubeServiceRegistryActor.RegisteredServices].map(_.services) } }
Example 169
Source File: KubeServiceRegistration.scala From lagom-on-kube with Apache License 2.0 | 5 votes |
package me.alexray.lagom.kube.client import java.net.URI import akka.actor.ActorSystem import com.lightbend.lagom.internal.scaladsl.registry.{ServiceRegistry, ServiceRegistryService} import com.lightbend.lagom.scaladsl.api.ServiceInfo import play.api.inject.ApplicationLifecycle import play.api.{Configuration, Logger} import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} import scala.language.postfixOps import scala.util.{Failure, Success} class KubeServiceRegistration(serviceInfo: ServiceInfo, lifecycle: ApplicationLifecycle, config: Configuration, registry: ServiceRegistry, actorSystem: ActorSystem)(implicit ec: ExecutionContext) { private val logger: Logger = Logger(this.getClass) private val uri = { val httpAddress = config.getString("service.http.address") .getOrElse(config.underlying.getString("play.server.http.address")) val httpPort = config.getString("play.server.http.port").get val uri = s"http://$httpAddress:$httpPort" logger.info(s"uri: $uri") URI.create(uri) } def completeServiceName(name: String): String = name + "-" + config.getString("service.instance.suffix").getOrElse("default").hashCode lifecycle.addStopHook { () => Future.sequence(serviceInfo.locatableServices.map { case (service, _) => registry.unregister(completeServiceName(service)).invoke() }).map(_ => ()) } private def heartBeat(): Unit = { actorSystem.scheduler.schedule(1 seconds, 1 minutes) { logger.debug("register service heartbeat ") register() } } private def register(): Unit = { serviceInfo.locatableServices.foreach { case (service, acls) => registry.register(completeServiceName(service)) .invoke(ServiceRegistryService(uri, acls)) .onComplete { case Success(_) => logger.info(s"Service name=[$service] successfully registered with service locator.") case Failure(e) => logger.error(s"Service name=[$service] couldn't register itself to the service locator.", e) logger.info("Service will try to register in 10 seconds with next heartbeat event") } } } heartBeat() }
Example 170
Source File: AuthenticationTests.scala From coursier with Apache License 2.0 | 5 votes |
package coursier import java.io.File import java.net.URI import java.nio.file.{Files, Path} import coursier.cache.FileCache import coursier.credentials.{DirectCredentials, FileCredentials} import coursier.parse.CredentialsParser import utest._ object AuthenticationTests extends TestSuite { private val testRepo = Option(System.getenv("TEST_REPOSITORY")).getOrElse(sys.error("TEST_REPOSITORY not set")) private val user = Option(System.getenv("TEST_REPOSITORY_USER")).getOrElse(sys.error("TEST_REPOSITORY_USER not set")) private val password = Option(System.getenv("TEST_REPOSITORY_PASSWORD")).getOrElse(sys.error("TEST_REPOSITORY_PASSWORD not set")) private val testHost = new URI(testRepo).getHost private def deleteRecursive(f: File): Unit = { if (f.isDirectory) f.listFiles().foreach(deleteRecursive) f.delete() } private def withTmpDir[T](f: Path => T): T = { val dir = Files.createTempDirectory("coursier-test") try f(dir) finally { deleteRecursive(dir.toFile) } } private def testCredentials(credentials: DirectCredentials): Unit = { val result = withTmpDir { dir => Resolve() .noMirrors .withRepositories(Seq( MavenRepository(testRepo), Repositories.central )) .addDependencies(dep"com.abc:test:0.1".withTransitive(false)) .withCache( FileCache() .noCredentials .withLocation(dir.toFile) .addCredentials(credentials) ) .run() } val modules = result.minDependencies.map(_.module) val expectedModules = Set(mod"com.abc:test") assert(modules == expectedModules) } val tests = Tests { * - { testCredentials { DirectCredentials() .withHost(testHost) .withUsername(user) .withPassword(password) .withMatchHost(true) .withHttpsOnly(false) } } * - { val credentialsStr = s"$testHost $user:$password" val credentials = CredentialsParser.parse(credentialsStr) match { case Left(error) => sys.error(s"Error parsing credentials: $error") case Right(c) => c } testCredentials(credentials) } * - { val content = s"""foo.username=$user |foo.password=$password |foo.host=$testHost |""".stripMargin val allCredentials = FileCredentials.parse(content, s"'$content'") assert(allCredentials.length == 1) val credentials = allCredentials.head testCredentials(credentials) } } }
Example 171
Source File: DirectCredentials.scala From coursier with Apache License 2.0 | 5 votes |
package coursier.credentials import java.net.URI import coursier.core.Authentication import scala.util.Try import dataclass._ @data class DirectCredentials( host: String = "", usernameOpt: Option[String] = None, passwordOpt: Option[Password[String]] = None, @since realm: Option[String] = None, @since optional: Boolean = true, @since matchHost: Boolean = DirectCredentials.defaultMatchHost, httpsOnly: Boolean = DirectCredentials.defaultHttpsOnly, passOnRedirect: Boolean = false ) extends Credentials { def withUsername(username: String): DirectCredentials = withUsernameOpt(Some(username)) def withPassword(password: String): DirectCredentials = withPasswordOpt(Some(Password(password))) def withRealm(realm: String): DirectCredentials = withRealm(Option(realm)) private def nonEmpty: Boolean = usernameOpt.nonEmpty && passwordOpt.nonEmpty // Can be called during redirections, to check whether these credentials apply to the redirection target def autoMatches(url: String, realm0: Option[String]): Boolean = nonEmpty && matchHost && { val uriOpt = Try(new URI(url)).toOption val schemeOpt = uriOpt.flatMap(uri => Option(uri.getScheme)) val hostOpt = uriOpt.flatMap(uri => Option(uri.getHost)) ((schemeOpt.contains("http") && !httpsOnly) || schemeOpt.contains("https")) && hostOpt.contains(host) && realm.forall(realm0.contains) } // Only called on initial artifact URLs, not on the ones originating from redirections def matches(url: String, user: String): Boolean = nonEmpty && { val uriOpt = Try(new URI(url)).toOption val schemeOpt = uriOpt.flatMap(uri => Option(uri.getScheme)) val hostOpt = uriOpt.flatMap(uri => Option(uri.getHost)) val userInfoOpt = uriOpt.flatMap(uri => Option(uri.getUserInfo)) // !matchHost && // ? userInfoOpt.isEmpty && ((schemeOpt.contains("http") && !httpsOnly) || schemeOpt.contains("https")) && hostOpt.contains(host) && usernameOpt.contains(user) } def authentication: Authentication = Authentication( usernameOpt.getOrElse(""), passwordOpt.map(_.value), realmOpt = realm, optional = optional, httpsOnly = httpsOnly, passOnRedirect = passOnRedirect ) def get(): Seq[DirectCredentials] = Seq(this) } object DirectCredentials { def apply(host: String, username: String, password: String): DirectCredentials = DirectCredentials(host, Some(username), Some(Password(password))) def apply(host: String, username: String, password: String, realm: Option[String]): DirectCredentials = DirectCredentials(host, Some(username), Some(Password(password)), realm) def defaultMatchHost: Boolean = true def defaultHttpsOnly: Boolean = false }
Example 172
Source File: S3LikeLocalFileSystem.scala From delta with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.delta import java.net.URI import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.RawLocalFileSystem class S3LikeLocalFileSystem extends RawLocalFileSystem { private var uri: URI = _ override def getScheme: String = "s3" override def initialize(name: URI, conf: Configuration): Unit = { uri = URI.create(name.getScheme + ":///") super.initialize(name, conf) } override def getUri(): URI = if (uri == null) { // RawLocalFileSystem's constructor will call this one before `initialize` is called. // Just return the super's URI to avoid NPE. super.getUri } else { uri } }
Example 173
Source File: VectorGrid.scala From osmesa with Apache License 2.0 | 5 votes |
package osmesa.analytics import java.io.ByteArrayInputStream import java.net.URI import java.util.zip.GZIPInputStream import geotrellis.proj4.WebMercator import geotrellis.spark.tiling.ZoomedLayoutScheme import geotrellis.vector.{Extent, PointFeature} import geotrellis.vectortile.{Layer, VInt64, VectorTile} import org.apache.commons.io.IOUtils import org.apache.spark.internal.Logging import osmesa.analytics.updater.Implicits._ import osmesa.analytics.updater._ import scala.collection.GenMap import scala.collection.parallel.TaskSupport trait VectorGrid extends Logging { // Default base zoom (highest resolution tiles produced) val DefaultBaseZoom: Int = 10 // Number of cells per side in a gridded tile implicit val Cells: Int = 128 // Number of cells in a gridded tile at the base of the pyramid (may be used for over-zooming) val BaseCells: Int = Cells // Default upload concurrency val DefaultUploadConcurrency: Int = 8 implicit val LayoutScheme: ZoomedLayoutScheme = ZoomedLayoutScheme(WebMercator) val SequenceLayerName: String = "__sequences__" def getCommittedSequences(tile: VectorTile): Set[Int] = // NOTE when working with hashtags, this should be the changeset sequence, since changes from a // single sequence may appear in different batches depending on when changeset metadata arrives tile.layers .get(SequenceLayerName) .map(_.features.flatMap(f => f.data.values.map(valueToLong).map(_.intValue))) .map(_.toSet) .getOrElse(Set.empty) def makeSequenceLayer(sequences: Set[Int], extent: Extent, tileWidth: Int = 4096): (String, Layer) = { // create a second layer w/ a feature corresponding to committed sequences (in the absence of // available tile / layer metadata) val updatedSequences = sequences.toSeq.sorted .takeRight(1000) .zipWithIndex .map { case (seq, idx) => idx.toString -> VInt64(seq) } .toMap val sequenceFeature = PointFeature(extent.center, updatedSequences) makeLayer(SequenceLayerName, extent, Seq(sequenceFeature), tileWidth) } def loadMVTs(urls: Map[URI, Extent])( implicit taskSupport: TaskSupport): GenMap[URI, VectorTile] = { // convert to a parallel collection to load more tiles concurrently val parUrls = urls.par parUrls.tasksupport = taskSupport parUrls.map { case (uri, extent) => (uri, read(uri).map( bytes => VectorTile.fromBytes( IOUtils.toByteArray(new GZIPInputStream(new ByteArrayInputStream(bytes))), extent))) } filter { case (_, mvt) => mvt.isDefined } map { case (uri, mvt) => uri -> mvt.get } } }
Example 174
Source File: DbUtils.scala From osmesa with Apache License 2.0 | 5 votes |
package osmesa.apps import java.net.URI import java.sql.Connection import vectorpipe.util.DBUtils object DbUtils { def saveLocations(procName: String, sequence: Int, databaseURI: URI) = { var connection: Connection = null try { connection = DBUtils.getJdbcConnection(databaseURI) val upsertSequence = connection.prepareStatement( """ |INSERT INTO checkpoints (proc_name, sequence) |VALUES (?, ?) |ON CONFLICT (proc_name) |DO UPDATE SET sequence = ? """.stripMargin ) upsertSequence.setString(1, procName) upsertSequence.setInt(2, sequence) upsertSequence.setInt(3, sequence) upsertSequence.execute() } finally { if (connection != null) connection.close() } } }
Example 175
Source File: EditHistogramTileCreator.scala From osmesa with Apache License 2.0 | 5 votes |
package osmesa.apps.batch import java.net.URI import cats.implicits._ import com.monovore.decline._ import org.apache.spark.sql.SparkSession import org.apache.spark.sql.functions._ import org.locationtech.geomesa.spark.jts._ import osmesa.analytics.{Analytics, EditHistogram} import vectorpipe.functions.asDouble object EditHistogramTileCreator extends CommandApp( name = "edit-histogram", header = "Create vector tiles containing histograms of editing activity", main = { val historyOpt = Opts .option[URI]("history", help = "URI of the history ORC file to process.") val outputOpt = Opts.option[URI]("out", help = "Base URI for output.") val concurrentUploadsOpt = Opts .option[Int]("concurrent-uploads", short = "c", metavar = "concurrent uploads", help = "Set the number of concurrent uploads.") .orNone val baseZoomOpt = Opts .option[Int]("base-zoom", short = "z", metavar = "Base zoom", help = "Most detailed zoom level") .orNone ( historyOpt, outputOpt, concurrentUploadsOpt, baseZoomOpt ).mapN { (historyURI, outputURI, _concurrentUploads, baseZoom) => implicit val spark: SparkSession = Analytics.sparkSession("State of the Data tile generation") import spark.implicits._ implicit val concurrentUploads: Option[Int] = _concurrentUploads spark.withJTS val history = spark.read.orc(historyURI.toString) val nodes = history .where('type === "node" and 'lat.isNotNull and 'lon.isNotNull) .withColumn("lat", asDouble('lat)) .withColumn("lon", asDouble('lon)) .where('uid > 1) .select(st_makePoint('lon, 'lat) as 'geom, year('timestamp) * 1000 + dayofyear('timestamp) as 'key) val stats = EditHistogram.create(nodes, outputURI, baseZoom.getOrElse(EditHistogram.DefaultBaseZoom)) println(s"${stats.count} tiles created.") spark.stop() } } )
Example 176
Source File: ChangeStreamProcessor.scala From osmesa with Apache License 2.0 | 5 votes |
package osmesa.apps.streaming import java.net.URI import cats.implicits._ import com.monovore.decline._ import org.apache.spark.sql._ import osmesa.analytics.Analytics import vectorpipe.sources.Source import vectorpipe.{internal => ProcessOSM} object ChangeStreamProcessor extends CommandApp( name = "osmesa-diff-stream-processor", header = "display diffs from a change stream", main = { val changeSourceOpt = Opts .option[URI]( "change-source", short = "c", metavar = "uri", help = "Location of changes to process" ) .withDefault(new URI("https://planet.osm.org/replication/minute/")) val startSequenceOpt = Opts .option[Int]( "start-sequence", short = "s", metavar = "sequence", help = "Starting sequence. If absent, the current (remote) sequence will be used." ) .orNone val endSequenceOpt = Opts .option[Int]( "end-sequence", short = "e", metavar = "sequence", help = "Ending sequence. If absent, this will be an infinite stream." ) .orNone val databaseUriOpt = Opts .option[URI]( "database-url", short = "d", metavar = "database URL", help = "Database URL (default: $DATABASE_URL environment variable)" ) .orElse(Opts.env[URI]("DATABASE_URL", help = "The URL of the database")) .orNone (changeSourceOpt, startSequenceOpt, endSequenceOpt, databaseUriOpt).mapN { (changeSource, startSequence, endSequence, databaseUri) => implicit val ss: SparkSession = Analytics.sparkSession("ChangeStreamProcessor") import ss.implicits._ val options = Map( Source.BaseURI -> changeSource.toString, Source.ProcessName -> "ChangeStream" ) ++ databaseUri .map(x => Map(Source.DatabaseURI -> x.toString)) .getOrElse(Map.empty[String, String]) ++ startSequence .map(s => Map(Source.StartSequence -> s.toString)) .getOrElse(Map.empty[String, String]) ++ endSequence .map(s => Map(Source.EndSequence -> s.toString)) .getOrElse(Map.empty[String, String]) val changes = ss.readStream .format(Source.Changes) .options(options) .load val changeProcessor = changes .select('id, 'version, 'lat, 'lon, 'visible) .where('_type === ProcessOSM.NodeType and !'visible) .writeStream .queryName("display change data") .format("console") .start changeProcessor.awaitTermination() ss.stop() } } )
Example 177
Source File: BaseAwsClientTest.scala From aws-spi-akka-http with Apache License 2.0 | 5 votes |
package com.github.matsluni.akkahttpspi import java.net.URI import com.dimafeng.testcontainers.{ForAllTestContainer, GenericContainer} import com.github.matsluni.akkahttpspi.testcontainers.LocalStackReadyLogWaitStrategy import org.scalatest.concurrent.{Eventually, Futures, IntegrationPatience} import org.scalatest.BeforeAndAfter import software.amazon.awssdk.core.SdkClient import software.amazon.awssdk.regions.Region import scala.util.Random import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec trait BaseAwsClientTest[C <: SdkClient] extends AnyWordSpec with Matchers with Futures with Eventually with BeforeAndAfter with IntegrationPatience with ForAllTestContainer { lazy val defaultRegion: Region = Region.EU_WEST_1 def client: C def exposedServicePort: Int val container: GenericContainer def endpoint = new URI(s"http://localhost:${container.mappedPort(exposedServicePort)}") def randomIdentifier(length: Int): String = Random.alphanumeric.take(length).mkString } trait LocalstackBaseAwsClientTest[C <: SdkClient] extends BaseAwsClientTest[C] { def service: String lazy val exposedServicePort: Int = LocalstackServicePorts.services(service) override lazy val container: GenericContainer = new GenericContainer( dockerImage = "localstack/localstack", exposedPorts = Seq(exposedServicePort), env = Map("SERVICES" -> service), waitStrategy = Some(LocalStackReadyLogWaitStrategy) ) } object LocalstackServicePorts { //services and ports based on https://github.com/localstack/localstack val services: Map[String, Int] = Map( "s3" -> 4572, "sqs" -> 4576, "sns" -> 4575, "dynamodb" -> 4569 ) }
Example 178
Source File: ScriptURL.scala From ncdbg with BSD 3-Clause "New" or "Revised" License | 5 votes |
package com.programmaticallyspeaking.ncd.infra import java.io.File import java.net.{URI, URL} final class ScriptURL private[infra](private val uri: URI) { import ScriptURL._ def toFile: File = new File(uri) def isFile: Boolean = uri.getScheme == "file" override def equals(other: Any): Boolean = other match { case that: ScriptURL if isFile && that.isFile => toFile == that.toFile case that: ScriptURL => uri == that.uri case _ => false } override def hashCode(): Int = { if (isFile) toFile.hashCode() else uri.hashCode() } override def toString: String = uri.toString def resolve(pathLike: String): ScriptURL = { if (looksLikeRelativePath(pathLike)) new ScriptURL(uri.resolve(pathLike)) else ScriptURL.create(pathLike) } } object ScriptURL { private[ScriptURL] def looksLikeRelativePath(x: String) = x.length > 0 && x(0) != '/' && !x.lift(1).contains(':') private def isAbsoluteUnixOrWindowsFilePath(x: String) = x.startsWith("/") || (x.lift(1).contains(':') && x.indexWhere(c => c == '\\' || c == '/') > 1) def create(url: URL): ScriptURL = new ScriptURL(url.toURI) def create(something: String): ScriptURL = { val uri = if (isAbsoluteUnixOrWindowsFilePath(something)) { val withUnixSlashes = something.replace("\\", "/") val uriPart = if (withUnixSlashes.startsWith("/")) withUnixSlashes else "/" + withUnixSlashes new URI("file", "", uriPart, null) } else if (something.startsWith("jar:")) { // Just drop the 'jar:' prefix. We keep the ! character so that the JAR file itself becomes // sort of a special folder. return create(something.substring(4)) } else if (something.startsWith("file:") || something.startsWith("eval:")) { // Assume this is something resembling an URL already, e.g. file:/foo/bar, // but we don't know how many slashes there are. var (scheme, rest) = something.span(_ != ':') rest = rest.substring(1) // skip the leading : val slashCount = rest.prefixLength(_ == '/') new URI(scheme, "", "/" + rest.substring(slashCount), null) } else if (something.contains("..")) { throw new IllegalArgumentException(s"Cannot create ScriptURL from path/URL with '..' ($something)") } else { val withUnixSlashes = something.replace("\\", "/") new URI(withUnixSlashes) } normalized(new ScriptURL(uri)) } private def normalized(url: ScriptURL): ScriptURL = { val normalized = url.uri.normalize() if (url.uri.getPath != normalized.getPath) { // normalization was necessary create(normalized.toString) } else url // no normalization necessary } } object FileScriptURL { def unapply(x: Any): Option[File] = x match { case s: ScriptURL if s.isFile => Some(s.toFile) case _ => None } }
Example 179
Source File: Schema.scala From mmlspark with MIT License | 5 votes |
// Copyright (C) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See LICENSE in project root for information. package com.microsoft.ml.spark.downloader import java.io.InputStream import java.net.URI import org.apache.commons.codec.digest.DigestUtils import spray.json._ import scala.collection.JavaConversions._ import scala.collection.JavaConverters._ private[spark] object NamingConventions { def canonicalModelFilename(name: String, dataset: String): String = s"${name}_$dataset.model" def canonicalModelFilename(model: ModelSchema): String = s"${model.name}_${model.dataset}.model" } case class ModelSchema(name: String, dataset: String, modelType: String, override val uri: URI, override val hash: String, override val size: Long, inputNode: Int, numLayers: Int, layerNames: Array[String]) extends Schema(uri, hash, size) { def this(name: String, dataset: String, modelType: String, uri: URI, hash: String, size: Long, inputNode: Int, numLayers: Int, layerNames: java.util.ArrayList[String]) = { this(name, dataset, modelType, uri, hash, size, inputNode, numLayers, layerNames.toList.toArray) } override def updateURI(newURI: URI): this.type = this.copy(uri = newURI).asInstanceOf[this.type] } private[spark] object SchemaJsonProtocol extends DefaultJsonProtocol { implicit object URIJsonFormat extends JsonFormat[URI] { def write(u: URI): JsValue = { JsString(u.toString) } def read(value: JsValue): URI = new URI(value.asInstanceOf[JsString].value) } implicit val ModelSchemaFormat: RootJsonFormat[ModelSchema] = jsonFormat(ModelSchema.apply, "name", "dataset", "modelType", "uri", "hash", "size", "inputNode", "numLayers", "layerNames") }
Example 180
Source File: PortForwarding.scala From mmlspark with MIT License | 5 votes |
// Copyright (C) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See LICENSE in project root for information. package com.microsoft.ml.spark.io.http import java.io.File import java.net.URI import com.jcraft.jsch.{JSch, Session} import org.apache.commons.io.IOUtils object PortForwarding { lazy val Jsch = new JSch() def forwardPortToRemote(username: String, sshHost: String, sshPort: Int, bindAddress: String, remotePortStart: Int, localHost: String, localPort: Int, keyDir: Option[String], keySas: Option[String], maxRetries: Int, timeout: Int ): (Session, Int) = { keyDir.foreach(kd => new File(kd).listFiles().foreach(f => try { Jsch.addIdentity(f.getAbsolutePath) } catch { case _: com.jcraft.jsch.JSchException => case e: Exception => throw e } ) ) keySas.foreach { ks => val privateKeyBytes = IOUtils.toByteArray(new URI(ks)) Jsch.addIdentity("forwardingKey", privateKeyBytes, null, null) //scalastyle:ignore null } val session = Jsch.getSession(username, sshHost, sshPort) session.setConfig("StrictHostKeyChecking", "no") session.setTimeout(timeout) session.connect() var attempt = 0 var foundPort: Option[Int] = None while (foundPort.isEmpty && attempt <= maxRetries) { try { session.setPortForwardingR( bindAddress, remotePortStart + attempt, localHost, localPort) foundPort = Some(remotePortStart + attempt) } catch { case _: Exception => println(s"failed to forward port. Attempt: $attempt") attempt += 1 } } if (foundPort.isEmpty) { throw new RuntimeException(s"Could not find open port between " + s"$remotePortStart and ${remotePortStart + maxRetries}") } println(s"forwarding to ${foundPort.get}") (session, foundPort.get) } def forwardPortToRemote(options: Map[String, String]): (Session, Int) = { forwardPortToRemote( options("forwarding.username"), options("forwarding.sshhost"), options.getOrElse("forwarding.sshport", "22").toInt, options.getOrElse("forwarding.bindaddress", "*"), options.get("forwarding.remoteportstart") .orElse(options.get("forwarding.localport")).get.toInt, options.getOrElse("forwarding.localhost", "0.0.0.0"), options("forwarding.localport").toInt, options.get("forwarding.keydir"), options.get("forwarding.keysas"), options.getOrElse("forwarding.maxretires", "50").toInt, options.getOrElse("forwarding.timeout", "20000").toInt ) } }
Example 181
Source File: SpeechToTextSuite.scala From mmlspark with MIT License | 5 votes |
// Copyright (C) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See LICENSE in project root for information. package com.microsoft.ml.spark.cognitive.split2 import java.net.{URI, URL} import com.microsoft.ml.spark.Secrets import com.microsoft.ml.spark.cognitive.{SpeechResponse, SpeechToText} import com.microsoft.ml.spark.core.test.fuzzing.{TestObject, TransformerFuzzing} import org.apache.commons.compress.utils.IOUtils import org.apache.spark.ml.util.MLReadable import org.apache.spark.sql.{DataFrame, Row} import org.scalactic.Equality trait SpeechKey { lazy val speechKey = sys.env.getOrElse("SPEECH_API_KEY", Secrets.SpeechApiKey) } class SpeechToTextSuite extends TransformerFuzzing[SpeechToText] with SpeechKey { import session.implicits._ val region = "eastus" val resourcesDir = System.getProperty("user.dir") + "/src/test/resources/" val uri = new URI(s"https://$region.api.cognitive.microsoft.com/sts/v1.0/issuetoken") val language = "en-us" val profanity = "masked" val format = "simple" lazy val stt = new SpeechToText() .setSubscriptionKey(speechKey) .setLocation(region) .setOutputCol("text") .setAudioDataCol("audio") .setLanguage("en-US") lazy val audioBytes: Array[Byte] = { IOUtils.toByteArray(new URL("https://mmlspark.blob.core.windows.net/datasets/Speech/test1.wav").openStream()) } lazy val df: DataFrame = Seq( Tuple1(audioBytes) ).toDF("audio") override lazy val dfEq = new Equality[DataFrame] { override def areEqual(a: DataFrame, b: Any): Boolean = baseDfEq.areEqual(a.drop("audio"), b.asInstanceOf[DataFrame].drop("audio")) } override def testSerialization(): Unit = { tryWithRetries(Array(0, 100, 100, 100, 100))(super.testSerialization) } def jaccardSimilarity(s1: String, s2: String): Double = { val a = Set(s1) val b = Set(s2) a.intersect(b).size.toDouble / (a | b).size.toDouble } test("Basic Usage") { val toObj: Row => SpeechResponse = SpeechResponse.makeFromRowConverter val result = toObj(stt.setFormat("simple") .transform(df).select("text") .collect().head.getStruct(0)) result.DisplayText.get.contains("this is a test") } test("Detailed Usage") { val toObj = SpeechResponse.makeFromRowConverter val result = toObj(stt.setFormat("detailed") .transform(df).select("text") .collect().head.getStruct(0)) result.NBest.get.head.Display.contains("this is a test") } override def testObjects(): Seq[TestObject[SpeechToText]] = Seq(new TestObject(stt, df)) override def reader: MLReadable[_] = SpeechToText }
Example 182
Source File: Dex2JawaConverter.scala From Argus-SAF with Apache License 2.0 | 5 votes |
package org.argus.amandroid.core.decompile import java.io._ import org.argus.jawa.core.util._ import java.net.URI import java.util.concurrent.TimeoutException import org.argus.amandroid.core.dedex.JawaDeDex import org.argus.amandroid.core.util.FixResources import org.xml.sax.SAXParseException object ConverterUtil { def copy(srcUri: FileResourceUri, destUri: FileResourceUri) { def copyFile(f: File) { try { val fin = new FileInputStream(f) val dest = new File(new File(new URI(destUri)), f.getName) val fout = new FileOutputStream(dest) val buffer = new Array[Byte](1024) var bytesRead = fin.read(buffer) while (bytesRead > 0) { fout.write(buffer, 0, bytesRead) bytesRead = fin.read(buffer) } fin.close() fout.close() } catch { case e: Exception => e.printStackTrace() } } val src = new File(new URI(srcUri)) // val dest = new File(new URI(destUri)) if (src.exists() && src.isDirectory) { src.listFiles().foreach { f => if (f.isFile) { copyFile(f) } } } } def cleanDir(dirUri: FileResourceUri) { val dir = new File(new URI(dirUri)) if (dir.exists) dir.listFiles.foreach { f => if (f.isDirectory) { cleanDir(f.getAbsoluteFile.toURI.toASCIIString) } f.delete() } } }
Example 183
Source File: ChatController.scala From Scala-Reactive-Programming with MIT License | 5 votes |
package controllers import java.net.URI import javax.inject._ import akka.actor.ActorSystem import akka.event.Logging import akka.stream.Materializer import akka.stream.scaladsl.{BroadcastHub, Flow, Keep, MergeHub, Source} import play.api.Logger import play.api.mvc._ import scala.concurrent.{ExecutionContext, Future} @Singleton class ChatController @Inject()(cc: ControllerComponents) (implicit actorSystem: ActorSystem, mat: Materializer, executionContext: ExecutionContext, webJarsUtil: org.webjars.play.WebJarsUtil) extends AbstractController(cc) with RequestMarkerContext { private type WSMessage = String private val logger = Logger(getClass) private implicit val logging = Logging(actorSystem.eventStream, logger.underlyingLogger.getName) private val (chatSink, chatSource) = { val source = MergeHub.source[WSMessage] .log("source") .recoverWithRetries(-1, { case _: Exception ⇒ Source.empty }) val sink = BroadcastHub.sink[WSMessage] source.toMat(sink)(Keep.both).run() } private val userFlow: Flow[WSMessage, WSMessage, _] = { Flow.fromSinkAndSource(chatSink, chatSource) } def index: Action[AnyContent] = Action { implicit request: RequestHeader => val webSocketUrl = routes.ChatController.chat().webSocketURL() logger.info(s"index: ") Ok(views.html.index(webSocketUrl)) } def chat(): WebSocket = { WebSocket.acceptOrResult[WSMessage, WSMessage] { case rh if sameOriginCheck(rh) => Future.successful(userFlow).map { flow => Right(flow) }.recover { case e: Exception => val msg = "Cannot create websocket" logger.error(msg, e) val result = InternalServerError(msg) Left(result) } case rejected => logger.error(s"Request ${rejected} failed same origin check") Future.successful { Left(Forbidden("forbidden")) } } } private def sameOriginCheck(implicit rh: RequestHeader): Boolean = { logger.debug("Checking the ORIGIN ") rh.headers.get("Origin") match { case Some(originValue) if originMatches(originValue) => logger.debug(s"originCheck: originValue = $originValue") true case Some(badOrigin) => logger.error(s"originCheck: rejecting request because Origin header value ${badOrigin} is not in the same origin") false case None => logger.error("originCheck: rejecting request because no Origin header found") false } } private def originMatches(origin: String): Boolean = { try { val url = new URI(origin) url.getHost == "localhost" && (url.getPort match { case 9000 | 19001 => true; case _ => false }) } catch { case e: Exception => false } } }
Example 184
Source File: TestUtilsSuite.scala From hail with MIT License | 5 votes |
package is.hail import java.io.File import java.lang.reflect.Modifier import java.net.URI import breeze.linalg.{DenseMatrix, DenseVector} import is.hail.utils.ArrayBuilder import org.testng.annotations.{DataProvider, Test} class TestUtilsSuite extends HailSuite { @Test def matrixEqualityTest() { val M = DenseMatrix((1d, 0d), (0d, 1d)) val M1 = DenseMatrix((1d, 0d), (0d, 1.0001d)) val V = DenseVector(0d, 1d) val V1 = DenseVector(0d, 0.5d) TestUtils.assertMatrixEqualityDouble(M, DenseMatrix.eye(2)) TestUtils.assertMatrixEqualityDouble(M, M1, 0.001) TestUtils.assertVectorEqualityDouble(V, 2d * V1) intercept[Exception](TestUtils.assertVectorEqualityDouble(V, V1)) intercept[Exception](TestUtils.assertMatrixEqualityDouble(M, M1)) } @Test def constantVectorTest() { assert(TestUtils.isConstant(DenseVector())) assert(TestUtils.isConstant(DenseVector(0))) assert(TestUtils.isConstant(DenseVector(0, 0))) assert(TestUtils.isConstant(DenseVector(0, 0, 0))) assert(!TestUtils.isConstant(DenseVector(0, 1))) assert(!TestUtils.isConstant(DenseVector(0, 0, 1))) } @Test def removeConstantColsTest(): Unit = { val M = DenseMatrix((0, 0, 1, 1, 0), (0, 1, 0, 1, 1)) val M1 = DenseMatrix((0, 1, 0), (1, 0, 1)) assert(TestUtils.removeConstantCols(M) == M1) } }
Example 185
Source File: TestConfig.scala From toketi-kafka-connect-iothub with MIT License | 5 votes |
// Copyright (c) Microsoft. All rights reserved. package com.microsoft.azure.iot.kafka.connect.source.testhelpers import java.net.URI import java.util import com.microsoft.azure.eventhubs.ConnectionStringBuilder import com.microsoft.azure.iot.kafka.connect.source.IotHubSourceConfig import com.typesafe.config.ConfigFactory object TestConfig { lazy val sourceTaskTestProps: util.Map[String, String] = { val props = new util.HashMap[String, String]() props.put(IotHubSourceConfig.EventHubCompatibleConnectionString, connStr.toString) props.put(IotHubSourceConfig.EventHubCompatibleName, iotHubName) props.put(IotHubSourceConfig.IotHubConsumerGroup, "$Default") props.put(IotHubSourceConfig.TaskPartitionOffsetsMap, """{"0":"5","2":"10","3":"-1"}""") props.put(IotHubSourceConfig.KafkaTopic, "test") props.put(IotHubSourceConfig.BatchSize, "5") props.put(IotHubSourceConfig.ReceiveTimeout, "5") props } lazy val sourceTaskTestPropsStartTime: util.Map[String, String] = { val props = new util.HashMap[String, String]() props.put(IotHubSourceConfig.EventHubCompatibleConnectionString, connStr.toString) props.put(IotHubSourceConfig.EventHubCompatibleName, iotHubName) props.put(IotHubSourceConfig.IotHubConsumerGroup, "$Default") props.put(IotHubSourceConfig.TaskPartitionOffsetsMap, """{"0":"5","2":"10","3":"-1"}""") props.put(IotHubSourceConfig.IotHubStartTime, "2016-12-10T00:00:00Z") props.put(IotHubSourceConfig.KafkaTopic, "test") props.put(IotHubSourceConfig.BatchSize, "5") props.put(IotHubSourceConfig.ReceiveTimeout, "5") props } lazy val sourceSingleTaskTestProps: util.Map[String, String] = { val props = new util.HashMap[String, String]() props.put(IotHubSourceConfig.EventHubCompatibleConnectionString, connStr.toString) props.put(IotHubSourceConfig.EventHubCompatibleName, iotHubName) props.put(IotHubSourceConfig.IotHubConsumerGroup, "$Default") props.put(IotHubSourceConfig.TaskPartitionOffsetsMap, """{"0":"-1"}""") props.put(IotHubSourceConfig.KafkaTopic, "test") props.put(IotHubSourceConfig.BatchSize, "5") props.put(IotHubSourceConfig.ReceiveTimeout, "5") props } lazy val sourceConnectorTestProps: util.Map[String, String] = { val props = new util.HashMap[String, String]() props.put(IotHubSourceConfig.EventHubCompatibleName, iotHubName) props.put(IotHubSourceConfig.EventHubCompatibleEndpoint, iotHubEndpoint) props.put(IotHubSourceConfig.IotHubAccessKeyName, iotHubKeyName) props.put(IotHubSourceConfig.IotHubAccessKeyValue, iotHubKeyValue) props.put(IotHubSourceConfig.IotHubPartitions, iotHubPartitions.toString) props.put(IotHubSourceConfig.KafkaTopic, "test") props.put(IotHubSourceConfig.ReceiveTimeout, "45") props.put(IotHubSourceConfig.IotHubOffset, "-1,5,10,15,-1") props } lazy val sourceConnectorTestPropsStartTime: util.Map[String, String] = { val props = new util.HashMap[String, String]() props.put(IotHubSourceConfig.EventHubCompatibleName, iotHubName) props.put(IotHubSourceConfig.EventHubCompatibleEndpoint, iotHubEndpoint) props.put(IotHubSourceConfig.IotHubAccessKeyName, iotHubKeyName) props.put(IotHubSourceConfig.IotHubAccessKeyValue, iotHubKeyValue) props.put(IotHubSourceConfig.IotHubPartitions, iotHubPartitions.toString) props.put(IotHubSourceConfig.KafkaTopic, "test") props.put(IotHubSourceConfig.IotHubStartTime, "2016-12-10T00:00:00Z") props } lazy val invalidSourceConnectorTestProps: util.Map[String, String] = { val props = new util.HashMap[String, String]() props.put(IotHubSourceConfig.EventHubCompatibleName, iotHubName) props.put(IotHubSourceConfig.EventHubCompatibleEndpoint, iotHubEndpoint) props.put(IotHubSourceConfig.IotHubAccessKeyName, iotHubKeyName) props.put(IotHubSourceConfig.IotHubAccessKeyValue, iotHubKeyValue) props } lazy private val config = ConfigFactory.load() lazy private val iotHubConfig = config.getConfig("iothub") lazy private val iotHubName = iotHubConfig.getString("name") lazy private val iotHubEndpoint = iotHubConfig.getString("endpoint") lazy private val iotHubKeyName = iotHubConfig.getString("keyName") lazy private val iotHubKeyValue = iotHubConfig.getString("key") lazy private val iotHubPartitions = iotHubConfig.getInt("partitions") lazy private val connStr = new ConnectionStringBuilder() .setEndpoint(new URI(iotHubEndpoint)) .setEventHubName(iotHubName) .setSasKeyName(iotHubKeyName) .setSasKey(iotHubKeyValue) }
Example 186
Source File: S3ContainerSpecSupport.scala From reactive-aws-clients with MIT License | 5 votes |
package com.github.j5ik2o.reactive.aws.s3 import java.net.URI import com.github.j5ik2o.reactive.aws.test.RandomPortSupport import com.spotify.docker.client.{ DefaultDockerClient, DockerClient } import com.whisk.docker.impl.spotify.SpotifyDockerFactory import com.whisk.docker.scalatest.DockerTestKit import com.whisk.docker.{ DockerCommandExecutor, DockerContainer, DockerContainerState, DockerFactory, DockerReadyChecker, LogLineReceiver } import org.scalatest.Suite import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider } import software.amazon.awssdk.services.s3.S3AsyncClient import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContext, Future } import scala.compat.java8.FutureConverters._ trait S3ContainerSpecSupport extends DockerTestKit with RandomPortSupport { this: Suite => // override val StartContainersTimeout: FiniteDuration = 30 seconds protected val connectTimeout: FiniteDuration = 3 seconds protected val readTimeout: FiniteDuration = 3 seconds protected val dockerClient: DockerClient = DefaultDockerClient .fromEnv() .connectTimeoutMillis(connectTimeout.toMillis) .readTimeoutMillis(readTimeout.toMillis).build() protected lazy val accessKeyId = "AKIAIOSFODNN7EXAMPLE" protected lazy val secretAccessKey = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" protected lazy val endpoint = s"http://127.0.0.1:$s3Port" protected lazy val javaS3Client: S3AsyncClient = S3AsyncClient .builder() .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKeyId, secretAccessKey))) .endpointOverride(URI.create(endpoint)) .build() class S3DockerReadyChecker(s3client: S3AsyncClient) extends DockerReadyChecker { override def apply(container: DockerContainerState)( implicit docker: DockerCommandExecutor, ec: ExecutionContext ): Future[Boolean] = s3client .listBuckets().toScala.map { _ => true }(ec).recover { case _ => false }(ec) } override implicit def dockerFactory: DockerFactory = new SpotifyDockerFactory(dockerClient) protected lazy val s3Port: Int = temporaryServerPort() protected lazy val s3Container: DockerContainer = DockerContainer("minio/minio") .withPorts(9000 -> Some(s3Port)) .withEnv(s"MINIO_ACCESS_KEY=$accessKeyId", s"MINIO_SECRET_KEY=$secretAccessKey") .withCommand("server", "/data") .withLogLineReceiver(LogLineReceiver(true, { message => println(message) })) .withReadyChecker(new S3DockerReadyChecker(javaS3Client)) abstract override def dockerContainers: List[DockerContainer] = s3Container :: super.dockerContainers }
Example 187
Source File: ShEx2JsonSchema.scala From shaclex with MIT License | 5 votes |
package es.weso.shex.converter import java.net.URI import es.weso.shex.Schema case class JsonSchema( prefixes: List[(String,URI)] ) object Shex2JsonSchema { def shex2JsonSchema(schema: Schema): JsonSchema = { // schema.prefixMap. // JsonSchema() ??? } }
Example 188
Source File: Neo4jTestUtils.scala From morpheus with Apache License 2.0 | 5 votes |
package org.opencypher.okapi.neo4j.io.testing import java.net.URI import org.neo4j.driver.v1.{Driver, Session, StatementResult} import org.opencypher.okapi.neo4j.io.Neo4jConfig import scala.collection.JavaConverters._ object Neo4jTestUtils { case class Neo4jContext(driver: Driver, session: Session, config: Neo4jConfig) { private def dropConstraint(desc: String) = { val regexp = """CONSTRAINT ON (.+) ASSERT \(?(.+?)\)? IS NODE KEY""".r val constraint = desc match { case regexp(label, keys) => s"CONSTRAINT ON $label ASSERT ($keys) IS NODE KEY" case c => c } execute(s"DROP $constraint").consume() } def clear(): Unit = { execute("MATCH (n) DETACH DELETE n") .consume() execute("CALL db.constraints()") .list(_.get("description").asString()).asScala .foreach(dropConstraint) execute("CALL db.indexes YIELD description") .list(_.get(0).asString).asScala .foreach(index => execute(s"DROP $index").consume()) } def close(): Unit = { try { clear() } finally { session.close() driver.close() } } def execute(cypher: String): StatementResult = { session.run(cypher) } } def connectNeo4j(dataFixture: String = "", uri: String = "bolt://localhost:7687"): Neo4jContext = { val neo4jURI = URI.create(uri) val config = Neo4jConfig(neo4jURI, user = "anonymous", password = Some("password"), encrypted = false) val driver = config.driver() val session = driver.session() val neo4jContext = Neo4jContext(driver, session, config) neo4jContext.clear() if (dataFixture.nonEmpty) { neo4jContext.execute(dataFixture).consume() } neo4jContext } }
Example 189
Source File: Neo4jWriteBenchmark.scala From morpheus with Apache License 2.0 | 5 votes |
package org.opencypher.okapi.neo4j.io import java.net.URI import org.neo4j.driver.v1.Values import org.opencypher.okapi.impl.util.Measurement import org.opencypher.okapi.neo4j.io.Neo4jHelpers.Neo4jDefaults.metaPropertyKey import org.opencypher.okapi.neo4j.io.Neo4jHelpers._ object Neo4jWriteBenchmark extends App { val config = Neo4jConfig( new URI("bolt://localhost"), "neo4j", Some("passwd") ) def rowToListValue(data: Array[AnyRef]) = Values.value(data.map(Values.value): _*) private val numberOfNodes = 10000 val inputNodes = (1 to numberOfNodes).map { i => Array[AnyRef](i.asInstanceOf[AnyRef], i.asInstanceOf[AnyRef], i.toString.asInstanceOf[AnyRef], (i % 2 == 0).asInstanceOf[AnyRef]) } val inputRels = (2 to numberOfNodes + 1).map { i => Array[AnyRef](i.asInstanceOf[AnyRef], (i - 1).asInstanceOf[AnyRef], i.asInstanceOf[AnyRef], (i % 2 == 0).asInstanceOf[AnyRef]) } config.withSession { session => session.run(s"CREATE CONSTRAINT ON (n:Foo) ASSERT n.$metaPropertyKey IS UNIQUE").consume() } val timings: Seq[Long] = (1 to 10).map { _ => config.withSession { session => session.run("MATCH (n) DETACH DELETE n").consume() } Measurement.time { ElementWriter.createNodes( inputNodes.toIterator, Array(metaPropertyKey, "val1", "val2", "val3"), config, Set("Foo", "Bar", "Baz") )(rowToListValue) ElementWriter.createRelationships( inputRels.toIterator, 1, 2, Array(metaPropertyKey, null, null, "val3"), config, "REL", Some("Foo") )(rowToListValue) }._2 } println(s"MIN: ${timings.min}") println(s"MAX: ${timings.max}") println(s"AVG: ${timings.sum / timings.size}") }
Example 190
Source File: YelpConstants.scala From morpheus with Apache License 2.0 | 5 votes |
package org.opencypher.morpheus.integration.yelp import java.net.URI import org.opencypher.okapi.api.graph.{GraphName, Namespace} import org.opencypher.okapi.neo4j.io.Neo4jConfig object YelpConstants { val neo4jConfig = Neo4jConfig(new URI("bolt://localhost:7687"), "neo4j", Some("yelp")) val yelpGraphName = GraphName("yelp") val yelpDB = "yelp" val yelpBookDB = "yelpBook" val defaultYelpJsonFolder = "yelp_json" val defaultYelpGraphFolder = "yelp_graph" val defaultYelpSubsetFolder = "yelp_subset" val userLabel = "User" val businessLabel = "Business" val reviewRelType = "REVIEWS" val friendRelType = "FRIEND" val fsNamespace = Namespace("fileSystem") val neo4jNamespace = Namespace("neo4j") val hiveNamespace = Namespace("hive") val city = "Boulder City" val cityGraphName = GraphName(city.replace(" ", "").toLowerCase) val businessTrendsGraphName = GraphName("businessTrends") def reviewGraphName(year: Int) = GraphName(s"$cityGraphName.review.y$year") def coReviewsGraphName(year: Int) = GraphName(s"$cityGraphName.coReviews.y$year") def coReviewedGraphName(year: Int) = GraphName(s"$cityGraphName.coReviewed.y$year") def coReviewAndBusinessGraphName(year: Int) = GraphName(s"$cityGraphName.coReviewsAndBusiness.y$year") def pageRankProp(year: Int) = s"pageRank$year" def pageRankCoReviewProp(year: Int) = s"pageRankCoReview$year" def communityProp(year: Int) = s"community$year" def isSimilarRelType(year: Int) = s"IS_SIMILAR_$year" def log(content: String, level: Int = 0): Unit = { val spaces = (0 to level).foldLeft("") { case (acc, _) => acc + " " } println(s"$spaces$content") } }
Example 191
Source File: ExampleTest.scala From morpheus with Apache License 2.0 | 5 votes |
package org.opencypher.morpheus.examples import java.io.{ByteArrayOutputStream, PrintStream} import java.net.URI import org.junit.runner.RunWith import org.opencypher.okapi.testing.Bag._ import org.scalatest.{BeforeAndAfterAll, FunSpec, Matchers} import org.scalatestplus.junit.JUnitRunner import scala.io.Source @RunWith(classOf[JUnitRunner]) abstract class ExampleTest extends FunSpec with Matchers with BeforeAndAfterAll { private val oldStdOut = System.out protected val emptyOutput: String = "" protected def validate(app: => Unit, expectedOut: URI): Unit = { validate(app, Source.fromFile(expectedOut).mkString) } protected def validateBag(app: => Unit, expectedOut: URI): Unit = { val source = Source.fromFile(expectedOut) val expectedLines = source.getLines().toList val appLines = capture(app).split(System.lineSeparator()) withClue(s"${appLines.mkString("\n")} not equal to ${expectedLines.mkString("\n")}") { appLines.toBag shouldEqual expectedLines.toBag } } protected def validate(app: => Unit, expectedOut: String): Unit = { capture(app) shouldEqual expectedOut } private def capture(app: => Unit): String = { val charset = "UTF-8" val outCapture = new ByteArrayOutputStream() val printer = new PrintStream(outCapture, true, charset) Console.withOut(printer)(app) outCapture.toString(charset) } override protected def afterAll(): Unit = { System.setOut(oldStdOut) super.afterAll() } }
Example 192
Source File: SolvedQueryModelTest.scala From morpheus with Apache License 2.0 | 5 votes |
package org.opencypher.okapi.logical.impl import java.net.URI import org.opencypher.okapi.api.types.{CTBoolean, CTNode, CTRelationship} import org.opencypher.okapi.impl.exception.IllegalArgumentException import org.opencypher.okapi.ir.api.RelType import org.opencypher.okapi.ir.api.block._ import org.opencypher.okapi.ir.api.expr.{Equals, Expr, _} import org.opencypher.okapi.ir.api.pattern.Pattern import org.opencypher.okapi.ir.impl.util.VarConverters._ import org.opencypher.okapi.testing.BaseTestSuite class SolvedQueryModelTest extends BaseTestSuite with IrConstruction { implicit val uri: URI = URI.create("test") test("add fields") { val s = SolvedQueryModel.empty.withField('a).withFields('b, 'c) s.fields should equal(Set('a, 'b, 'c).map(toField)) } test("contains a block") { val block = matchBlock(Pattern.empty.withElement('a).withElement('b).withElement('c)) val s = SolvedQueryModel.empty.withField('a).withFields('b, 'c) s.contains(block) shouldBe true } test("contains several blocks") { val block1 = matchBlock(Pattern.empty.withElement('a -> CTNode)) val block2 = matchBlock(Pattern.empty.withElement('b -> CTNode)) val binds: Fields = Fields(Map(toField('c) -> Equals('a, 'b))) val block3 = project(binds) val block4 = project(ProjectedFieldsOf(toField('d) -> Equals('c, 'b))) val s = SolvedQueryModel.empty.withField('a).withFields('b, 'c) s.contains(block1) shouldBe true s.contains(block1, block2) shouldBe true s.contains(block1, block2, block3) shouldBe true s.contains(block1, block2, block3, block4) shouldBe false } test("solves") { val s = SolvedQueryModel.empty.withField('a).withFields('b, 'c) val p = Pattern.empty.withElement('a -> CTNode).withElement('b -> CTNode).withElement('c -> CTNode) s.solves(toField('a)) shouldBe true s.solves(toField('b)) shouldBe true s.solves(toField('x)) shouldBe false s.solves(p) shouldBe true s.solves(p.withElement('x -> CTNode)) shouldBe false } it("can solve a relationship") { val s = SolvedQueryModel.empty an [IllegalArgumentException] should be thrownBy s.solveRelationship('a) s.solveRelationship('r -> CTRelationship) should equal(SolvedQueryModel.empty.withField('r -> CTRelationship)) s.solveRelationship('r -> CTRelationship("KNOWS")) should equal( SolvedQueryModel.empty .withField('r -> CTRelationship) .withPredicate(HasType(Var("r")(CTRelationship("KNOWS")), RelType("KNOWS"))) ) s.solveRelationship('r -> CTRelationship("KNOWS", "LOVES", "HATES")) should equal( SolvedQueryModel.empty .withField('r -> CTRelationship) .withPredicate(Ors( HasType(RelationshipVar("r")(), RelType("KNOWS")), HasType(RelationshipVar("r")(), RelType("LOVES")), HasType(RelationshipVar("r")(), RelType("HATES")))) ) } }
Example 193
Source File: MiniDFSClusterFixture.scala From morpheus with Apache License 2.0 | 5 votes |
package org.opencypher.morpheus.testing.fixture import java.io.File import java.net.URI import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.hadoop.hdfs.MiniDFSCluster import org.apache.http.client.utils.URIBuilder import org.opencypher.morpheus.testing.MorpheusTestSuite import org.opencypher.okapi.testing.BaseTestFixture trait MiniDFSClusterFixture extends BaseTestFixture { self: SparkSessionFixture with MorpheusTestSuite => private val HDFS_URI_SCHEME = "hdfs" // Override this to some String to trigger a copy from local FS into HDFS // e.g. dfsTestGraph = "/foo" triggers a copy from 'file:/path/to/resources/foo' to 'hdfs://host:port/foo' protected def dfsTestGraphPath: Option[String] = None // Override this to change the path to the graph in the local file system // If None, it is assumed that the local graph is contained in resources/<dfsTestGraphPath> protected def fsTestGraphPath: Option[String] = None protected lazy val cluster: MiniDFSCluster = { val cluster = new MiniDFSCluster.Builder(sparkSession.sparkContext.hadoopConfiguration).build() cluster.waitClusterUp() // copy from local FS to HDFS if necessary if (dfsTestGraphPath.isDefined) { val dfsPathString = dfsTestGraphPath.get val fsPathString = fsTestGraphPath.getOrElse(getClass.getResource(dfsPathString).toString) cluster.getFileSystem.copyFromLocalFile( new Path(fsPathString), new Path(dfsPathString)) } cluster } protected def hdfsURI: URI = new URIBuilder() .setScheme(HDFS_URI_SCHEME) .setHost(cluster.getNameNode.getHostAndPort) .setPath(dfsTestGraphPath.getOrElse(File.separator)) .build() protected def clusterConfig: Configuration = { sparkSession.sparkContext.hadoopConfiguration .set("fs.default.name", new URIBuilder() .setScheme(HDFS_URI_SCHEME) .setHost(cluster.getNameNode.getHostAndPort) .build() .toString ) sparkSession.sparkContext.hadoopConfiguration } abstract override def afterAll: Unit = { sparkSession.sparkContext.hadoopConfiguration.clear() cluster.shutdown(true) super.afterAll() } }
Example 194
Source File: Neo4jConfig.scala From morpheus with Apache License 2.0 | 5 votes |
package org.opencypher.okapi.neo4j.io import java.net.URI import java.util.concurrent.TimeUnit import org.neo4j.driver.v1.{AuthTokens, Config, Driver, GraphDatabase} case class Neo4jConfig( uri: URI, user: String = "neo4j", password: Option[String] = None, encrypted: Boolean = true, createNodeBatchSize: Int = 100000, createRelationshipBatchSize: Int = 100000, mergeNodeBatchSize: Int = 1000, mergeRelationshipBatchSize: Int = 10 ) { def driver(): Driver = password match { case Some(pwd) => GraphDatabase.driver(uri, AuthTokens.basic(user, pwd), boltConfig()) case _ => GraphDatabase.driver(uri, boltConfig()) } private def boltConfig(): Config = { val builder = Config.build.withMaxTransactionRetryTime(1, TimeUnit.MINUTES) if (encrypted) builder.withEncryption().toConfig else builder.withoutEncryption().toConfig } }
Example 195
Source File: HadoopFileLinesReader.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.datasources import java.io.Closeable import java.net.URI import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.hadoop.io.Text import org.apache.hadoop.mapreduce._ import org.apache.hadoop.mapreduce.lib.input.{FileSplit, LineRecordReader} import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl class HadoopFileLinesReader( file: PartitionedFile, conf: Configuration) extends Iterator[Text] with Closeable { private val iterator = { val fileSplit = new FileSplit( new Path(new URI(file.filePath)), file.start, file.length, // TODO: Implement Locality Array.empty) val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0) val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId) val reader = new LineRecordReader() reader.initialize(fileSplit, hadoopAttemptContext) new RecordReaderIterator(reader) } override def hasNext: Boolean = iterator.hasNext override def next(): Text = iterator.next() override def close(): Unit = iterator.close() }
Example 196
Source File: resources.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.command import java.io.File import java.net.URI import org.apache.hadoop.fs.Path import org.apache.spark.sql.{Row, SparkSession} import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference} import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType} case class ListJarsCommand(jars: Seq[String] = Seq.empty[String]) extends RunnableCommand { override val output: Seq[Attribute] = { AttributeReference("Results", StringType, nullable = false)() :: Nil } override def run(sparkSession: SparkSession): Seq[Row] = { val jarList = sparkSession.sparkContext.listJars() if (jars.nonEmpty) { for { jarName <- jars.map(f => new Path(f).getName) jarPath <- jarList if jarPath.contains(jarName) } yield Row(jarPath) } else { jarList.map(Row(_)) } } }
Example 197
Source File: FileStreamSourceSuite.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.streaming import java.io.File import java.net.URI import scala.util.Random import org.apache.hadoop.fs.{FileStatus, Path, RawLocalFileSystem} import org.apache.spark.SparkFunSuite import org.apache.spark.sql.execution.streaming.ExistsThrowsExceptionFileSystem._ import org.apache.spark.sql.test.SharedSQLContext import org.apache.spark.sql.types.StructType class FileStreamSourceSuite extends SparkFunSuite with SharedSQLContext { import FileStreamSource._ test("SeenFilesMap") { val map = new SeenFilesMap(maxAgeMs = 10) map.add("a", 5) assert(map.size == 1) map.purge() assert(map.size == 1) // Add a new entry and purge should be no-op, since the gap is exactly 10 ms. map.add("b", 15) assert(map.size == 2) map.purge() assert(map.size == 2) // Add a new entry that's more than 10 ms than the first entry. We should be able to purge now. map.add("c", 16) assert(map.size == 3) map.purge() assert(map.size == 2) // Override existing entry shouldn't change the size map.add("c", 25) assert(map.size == 2) // Not a new file because we have seen c before assert(!map.isNewFile("c", 20)) // Not a new file because timestamp is too old assert(!map.isNewFile("d", 5)) // Finally a new file: never seen and not too old assert(map.isNewFile("e", 20)) } test("SeenFilesMap should only consider a file old if it is earlier than last purge time") { val map = new SeenFilesMap(maxAgeMs = 10) map.add("a", 20) assert(map.size == 1) // Timestamp 5 should still considered a new file because purge time should be 0 assert(map.isNewFile("b", 9)) assert(map.isNewFile("b", 10)) // Once purge, purge time should be 10 and then b would be a old file if it is less than 10. map.purge() assert(!map.isNewFile("b", 9)) assert(map.isNewFile("b", 10)) } testWithUninterruptibleThread("do not recheck that files exist during getBatch") { withTempDir { temp => spark.conf.set( s"fs.$scheme.impl", classOf[ExistsThrowsExceptionFileSystem].getName) // add the metadata entries as a pre-req val dir = new File(temp, "dir") // use non-existent directory to test whether log make the dir val metadataLog = new FileStreamSourceLog(FileStreamSourceLog.VERSION, spark, dir.getAbsolutePath) assert(metadataLog.add(0, Array(FileEntry(s"$scheme:///file1", 100L, 0)))) val newSource = new FileStreamSource(spark, s"$scheme:///", "parquet", StructType(Nil), Nil, dir.getAbsolutePath, Map.empty) // this method should throw an exception if `fs.exists` is called during resolveRelation newSource.getBatch(None, FileStreamSourceOffset(1)) } } } override def listStatus(file: Path): Array[FileStatus] = { val emptyFile = new FileStatus() emptyFile.setPath(file) Array(emptyFile) } } object ExistsThrowsExceptionFileSystem { val scheme = s"FileStreamSourceSuite${math.abs(Random.nextInt)}fs" }
Example 198
Source File: ApplicationDescription.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import java.net.URI private[spark] case class ApplicationDescription( name: String, maxCores: Option[Int], memoryPerExecutorMB: Int, command: Command, appUiUrl: String, eventLogDir: Option[URI] = None, // short name of compression codec used when writing event logs, if any (e.g. lzf) eventLogCodec: Option[String] = None, coresPerExecutor: Option[Int] = None, // number of executors this application wants to start with, // only used if dynamic allocation is enabled initialExecutorLimit: Option[Int] = None, user: String = System.getProperty("user.name", "<unknown>")) { override def toString: String = "ApplicationDescription(" + name + ")" }
Example 199
Source File: ClientArguments.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import java.net.{URI, URISyntaxException} import scala.annotation.tailrec import scala.collection.mutable.ListBuffer import org.apache.log4j.Level import org.apache.spark.util.{IntParam, MemoryParam, Utils} private def printUsageAndExit(exitCode: Int) { // TODO: It wouldn't be too hard to allow users to submit their app and dependency jars // separately similar to in the YARN client. val usage = s""" |Usage: DriverClient [options] launch <active-master> <jar-url> <main-class> [driver options] |Usage: DriverClient kill <active-master> <driver-id> | |Options: | -c CORES, --cores CORES Number of cores to request (default: $DEFAULT_CORES) | -m MEMORY, --memory MEMORY Megabytes of memory to request (default: $DEFAULT_MEMORY) | -s, --supervise Whether to restart the driver on failure | (default: $DEFAULT_SUPERVISE) | -v, --verbose Print more debugging output """.stripMargin // scalastyle:off println System.err.println(usage) // scalastyle:on println System.exit(exitCode) } } private[deploy] object ClientArguments { val DEFAULT_CORES = 1 val DEFAULT_MEMORY = Utils.DEFAULT_DRIVER_MEM_MB // MB val DEFAULT_SUPERVISE = false def isValidJarUrl(s: String): Boolean = { try { val uri = new URI(s) uri.getScheme != null && uri.getPath != null && uri.getPath.endsWith(".jar") } catch { case _: URISyntaxException => false } } }
Example 200
Source File: EventHistoryReporter.scala From sparklens with Apache License 2.0 | 5 votes |
package com.qubole.sparklens.app import java.io.{BufferedInputStream, InputStream} import java.net.URI import com.ning.compress.lzf.LZFInputStream import com.qubole.sparklens.QuboleJobListener import com.qubole.sparklens.common.Json4sWrapper import com.qubole.sparklens.helper.HDFSConfigHelper import net.jpountz.lz4.LZ4BlockInputStream import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.spark.SparkConf import org.json4s.DefaultFormats import org.xerial.snappy.SnappyInputStream class EventHistoryReporter(file: String, extraConf: List[(String, String)] = List.empty) { // This is using reflection in spark-2.0.0 ReplayListenerBus val busKlass = Class.forName("org.apache.spark.scheduler.ReplayListenerBus") val bus = busKlass.newInstance() val addListenerMethod = busKlass.getMethod("addListener", classOf[Object]) val conf = new SparkConf() .set("spark.sparklens.reporting.disabled", "false") .set("spark.sparklens.save.data", "false") extraConf.foreach(x => { conf.set(x._1, x._2) }) val listener = new QuboleJobListener(conf) addListenerMethod.invoke(bus, listener) try { val replayMethod = busKlass.getMethod("replay", classOf[InputStream], classOf[String], classOf[Boolean]) replayMethod.invoke(bus, getDecodedInputStream(file, conf), file, boolean2Boolean(false)) } catch { case _: NoSuchMethodException => // spark binaries are 2.1* and above val replayMethod = busKlass.getMethod("replay", classOf[InputStream], classOf[String], classOf[Boolean], classOf[String => Boolean]) replayMethod.invoke(bus, getDecodedInputStream(file, conf), file, boolean2Boolean(false), getFilter _) case x: Exception => { println(s"Failed replaying events from ${file} [${x.getMessage}]") } } // Borrowed from CompressionCodecs in spark private def getDecodedInputStream(file: String, conf: SparkConf): InputStream = { val fs = FileSystem.get(new URI(file), HDFSConfigHelper.getHadoopConf(Some(conf))) val path = new Path(file) val bufStream = new BufferedInputStream(fs.open(path)) val logName = path.getName.stripSuffix(".inprogress") val codecName: Option[String] = logName.split("\\.").tail.lastOption codecName.getOrElse("") match { case "lz4" => new LZ4BlockInputStream(bufStream) case "lzf" => new LZFInputStream(bufStream) case "snappy" => new SnappyInputStream(bufStream) case _ => bufStream } } private def getFilter(eventString: String): Boolean = { implicit val formats = DefaultFormats eventFilter.contains(Json4sWrapper.parse(eventString).extract[Map[String, Any]].get("Event") .get.asInstanceOf[String]) } private def eventFilter: Set[String] = { Set( "SparkListenerTaskEnd", "SparkListenerApplicationStart", "SparkListenerApplicationEnd", "SparkListenerExecutorAdded", "SparkListenerExecutorRemoved", "SparkListenerJobStart", "SparkListenerJobEnd", "SparkListenerStageSubmitted", "SparkListenerStageCompleted" ) } }