scala.xml.Node Scala Examples
The following examples show how to use scala.xml.Node.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: DFDLDefineFormat.scala From incubator-daffodil with Apache License 2.0 | 6 votes |
package org.apache.daffodil.dsom import scala.xml.Node import scala.xml.Utility import org.apache.daffodil.xml.XMLUtils final class DFDLDefineFormat(node: Node, sd: SchemaDocument) extends DFDLDefiningAnnotation(node, sd) // Note: DefineFormat is not a format annotation { requiredEvaluationsAlways(formatAnnotation) // baseFormat was removed from the DFDL spec. Just use a ref from the // dfdl:format inside. // lazy val baseFormat = getAttributeOption("baseFormat") // nor baseFormat lazy val formatAnnotation = LV('formatAnnotation) { XMLUtils.removeComments(Utility.trim(node)) match { case <defineFormat>{ f @ <format>{ _* }</format> }</defineFormat> => new DFDLFormat(f, sd) case _ => schemaDefinitionError("dfdl:defineFormat does not contain a dfdl:format element.") } }.value }
Example 2
Source File: ApplicationHistoryProvider.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.history import java.util.zip.ZipOutputStream import scala.xml.Node import org.apache.spark.SparkException import org.apache.spark.ui.SparkUI private[spark] case class ApplicationAttemptInfo( attemptId: Option[String], startTime: Long, endTime: Long, lastUpdated: Long, sparkUser: String, completed: Boolean = false) private[spark] case class ApplicationHistoryInfo( id: String, name: String, attempts: List[ApplicationAttemptInfo]) { def getEmptyListingHtml(): Seq[Node] = Seq.empty }
Example 3
Source File: HistoryPage.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.history import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.ui.{UIUtils, WebUIPage} private[history] class HistoryPage(parent: HistoryServer) extends WebUIPage("") { def render(request: HttpServletRequest): Seq[Node] = { val requestedIncomplete = Option(request.getParameter("showIncomplete")).getOrElse("false").toBoolean val allAppsSize = parent.getApplicationList().count(_.completed != requestedIncomplete) val providerConfig = parent.getProviderConfig() val content = <div> <div class="span12"> <ul class="unstyled"> {providerConfig.map { case (k, v) => <li><strong>{k}:</strong> {v}</li> }} </ul> { if (allAppsSize > 0) { <script src={UIUtils.prependBaseUri("/static/dataTables.rowsGroup.js")}></script> ++ <div id="history-summary" class="span12 pagination"></div> ++ <script src={UIUtils.prependBaseUri("/static/utils.js")}></script> ++ <script src={UIUtils.prependBaseUri("/static/historypage.js")}></script> ++ <script>setAppLimit({parent.maxApplications})</script> } else if (requestedIncomplete) { <h4>No incomplete applications found!</h4> } else { <h4>No completed applications found!</h4> ++ parent.emptyListingHtml } } <a href={makePageLink(!requestedIncomplete)}> { if (requestedIncomplete) { "Back to completed applications" } else { "Show incomplete applications" } } </a> </div> </div> UIUtils.basicSparkPage(content, "History Server", true) } private def makePageLink(showIncomplete: Boolean): String = { UIUtils.prependBaseUri("/?" + "showIncomplete=" + showIncomplete) } }
Example 4
Source File: ExecutorsPage.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.exec import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.status.api.v1.ExecutorSummary import org.apache.spark.ui.{UIUtils, WebUIPage} // This isn't even used anymore -- but we need to keep it b/c of a MiMa false positive private[ui] case class ExecutorSummaryInfo( id: String, hostPort: String, rddBlocks: Int, memoryUsed: Long, diskUsed: Long, activeTasks: Int, failedTasks: Int, completedTasks: Int, totalTasks: Int, totalDuration: Long, totalInputBytes: Long, totalShuffleRead: Long, totalShuffleWrite: Long, maxMemory: Long, executorLogs: Map[String, String]) private[ui] class ExecutorsPage( parent: ExecutorsTab, threadDumpEnabled: Boolean) extends WebUIPage("") { private val listener = parent.listener def render(request: HttpServletRequest): Seq[Node] = { val content = <div> { <div id="active-executors"></div> ++ <script src={UIUtils.prependBaseUri("/static/utils.js")}></script> ++ <script src={UIUtils.prependBaseUri("/static/executorspage.js")}></script> ++ <script>setThreadDumpEnabled({threadDumpEnabled})</script> } </div>; UIUtils.headerSparkPage("Executors", content, parent, useDataTables = true) } } private[spark] object ExecutorsPage { def getExecInfo( listener: ExecutorsListener, statusId: Int, isActive: Boolean): ExecutorSummary = { val status = if (isActive) { listener.activeStorageStatusList(statusId) } else { listener.deadStorageStatusList(statusId) } val execId = status.blockManagerId.executorId val hostPort = status.blockManagerId.hostPort val rddBlocks = status.numBlocks val memUsed = status.memUsed val maxMem = status.maxMem val diskUsed = status.diskUsed val taskSummary = listener.executorToTaskSummary.getOrElse(execId, ExecutorTaskSummary(execId)) new ExecutorSummary( execId, hostPort, isActive, rddBlocks, memUsed, diskUsed, taskSummary.totalCores, taskSummary.tasksMax, taskSummary.tasksActive, taskSummary.tasksFailed, taskSummary.tasksComplete, taskSummary.tasksActive + taskSummary.tasksFailed + taskSummary.tasksComplete, taskSummary.duration, taskSummary.jvmGCTime, taskSummary.inputBytes, taskSummary.shuffleRead, taskSummary.shuffleWrite, maxMem, taskSummary.executorLogs ) } }
Example 5
Source File: ExecutorThreadDumpPage.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.exec import javax.servlet.http.HttpServletRequest import scala.xml.{Node, Text} import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class ExecutorThreadDumpPage(parent: ExecutorsTab) extends WebUIPage("threadDump") { private val sc = parent.sc def render(request: HttpServletRequest): Seq[Node] = { val executorId = Option(request.getParameter("executorId")).map { executorId => UIUtils.decodeURLParameter(executorId) }.getOrElse { throw new IllegalArgumentException(s"Missing executorId parameter") } val time = System.currentTimeMillis() val maybeThreadDump = sc.get.getExecutorThreadDump(executorId) val content = maybeThreadDump.map { threadDump => val dumpRows = threadDump.sortWith { case (threadTrace1, threadTrace2) => val v1 = if (threadTrace1.threadName.contains("Executor task launch")) 1 else 0 val v2 = if (threadTrace2.threadName.contains("Executor task launch")) 1 else 0 if (v1 == v2) { threadTrace1.threadName.toLowerCase < threadTrace2.threadName.toLowerCase } else { v1 > v2 } }.map { thread => val threadId = thread.threadId <tr id={s"thread_${threadId}_tr"} class="accordion-heading" onclick={s"toggleThreadStackTrace($threadId, false)"} onmouseover={s"onMouseOverAndOut($threadId)"} onmouseout={s"onMouseOverAndOut($threadId)"}> <td id={s"${threadId}_td_id"}>{threadId}</td> <td id={s"${threadId}_td_name"}>{thread.threadName}</td> <td id={s"${threadId}_td_state"}>{thread.threadState}</td> <td id={s"${threadId}_td_stacktrace"} class="hidden">{thread.stackTrace}</td> </tr> } <div class="row-fluid"> <p>Updated at {UIUtils.formatDate(time)}</p> { // scalastyle:off <p><a class="expandbutton" onClick="expandAllThreadStackTrace(true)"> Expand All </a></p> <p><a class="expandbutton hidden" onClick="collapseAllThreadStackTrace(true)"> Collapse All </a></p> <div class="form-inline"> <div class="bs-example" data-example-id="simple-form-inline"> <div class="form-group"> <div class="input-group"> Search: <input type="text" class="form-control" id="search" oninput="onSearchStringChange()"></input> </div> </div> </div> </div> <p></p> // scalastyle:on } <table class={UIUtils.TABLE_CLASS_STRIPED + " accordion-group" + " sortable"}> <thead> <th onClick="collapseAllThreadStackTrace(false)">Thread ID</th> <th onClick="collapseAllThreadStackTrace(false)">Thread Name</th> <th onClick="collapseAllThreadStackTrace(false)">Thread State</th> </thead> <tbody>{dumpRows}</tbody> </table> </div> }.getOrElse(Text("Error fetching thread dump")) UIUtils.headerSparkPage(s"Thread dump for executor $executorId", content, parent) } }
Example 6
Source File: EnvironmentPage.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.env import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class EnvironmentPage(parent: EnvironmentTab) extends WebUIPage("") { private val listener = parent.listener private def removePass(kv: (String, String)): (String, String) = { if (kv._1.toLowerCase.contains("password") || kv._1.toLowerCase.contains("secret")) { (kv._1, "******") } else kv } def render(request: HttpServletRequest): Seq[Node] = { val runtimeInformationTable = UIUtils.listingTable( propertyHeader, jvmRow, listener.jvmInformation, fixedWidth = true) val sparkPropertiesTable = UIUtils.listingTable( propertyHeader, propertyRow, listener.sparkProperties.map(removePass), fixedWidth = true) val systemPropertiesTable = UIUtils.listingTable( propertyHeader, propertyRow, listener.systemProperties, fixedWidth = true) val classpathEntriesTable = UIUtils.listingTable( classPathHeaders, classPathRow, listener.classpathEntries, fixedWidth = true) val content = <span> <h4>Runtime Information</h4> {runtimeInformationTable} <h4>Spark Properties</h4> {sparkPropertiesTable} <h4>System Properties</h4> {systemPropertiesTable} <h4>Classpath Entries</h4> {classpathEntriesTable} </span> UIUtils.headerSparkPage("Environment", content, parent) } private def propertyHeader = Seq("Name", "Value") private def classPathHeaders = Seq("Resource", "Source") private def jvmRow(kv: (String, String)) = <tr><td>{kv._1}</td><td>{kv._2}</td></tr> private def propertyRow(kv: (String, String)) = <tr><td>{kv._1}</td><td>{kv._2}</td></tr> private def classPathRow(data: (String, String)) = <tr><td>{data._1}</td><td>{data._2}</td></tr> }
Example 7
Source File: PoolTable.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.jobs import java.net.URLEncoder import scala.collection.mutable.HashMap import scala.xml.Node import org.apache.spark.scheduler.{Schedulable, StageInfo} import org.apache.spark.ui.UIUtils private[ui] class PoolTable(pools: Seq[Schedulable], parent: StagesTab) { private val listener = parent.progressListener def toNodeSeq: Seq[Node] = { listener.synchronized { poolTable(poolRow, pools) } } private def poolTable( makeRow: (Schedulable, HashMap[String, HashMap[Int, StageInfo]]) => Seq[Node], rows: Seq[Schedulable]): Seq[Node] = { <table class="table table-bordered table-striped table-condensed sortable table-fixed"> <thead> <th>Pool Name</th> <th>Minimum Share</th> <th>Pool Weight</th> <th>Active Stages</th> <th>Running Tasks</th> <th>SchedulingMode</th> </thead> <tbody> {rows.map(r => makeRow(r, listener.poolToActiveStages))} </tbody> </table> } private def poolRow( p: Schedulable, poolToActiveStages: HashMap[String, HashMap[Int, StageInfo]]): Seq[Node] = { val activeStages = poolToActiveStages.get(p.name) match { case Some(stages) => stages.size case None => 0 } val href = "%s/stages/pool?poolname=%s" .format(UIUtils.prependBaseUri(parent.basePath), URLEncoder.encode(p.name, "UTF-8")) <tr> <td> <a href={href}>{p.name}</a> </td> <td>{p.minShare}</td> <td>{p.weight}</td> <td>{activeStages}</td> <td>{p.runningTasks}</td> <td>{p.schedulingMode}</td> </tr> } }
Example 8
Source File: PoolPage.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.jobs import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.scheduler.StageInfo import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class PoolPage(parent: StagesTab) extends WebUIPage("pool") { private val sc = parent.sc private val listener = parent.progressListener def render(request: HttpServletRequest): Seq[Node] = { listener.synchronized { val poolName = Option(request.getParameter("poolname")).map { poolname => UIUtils.decodeURLParameter(poolname) }.getOrElse { throw new IllegalArgumentException(s"Missing poolname parameter") } val poolToActiveStages = listener.poolToActiveStages val activeStages = poolToActiveStages.get(poolName) match { case Some(s) => s.values.toSeq case None => Seq[StageInfo]() } val shouldShowActiveStages = activeStages.nonEmpty val activeStagesTable = new StageTableBase(request, activeStages, "", "activeStage", parent.basePath, "stages/pool", parent.progressListener, parent.isFairScheduler, parent.killEnabled, isFailedStage = false) // For now, pool information is only accessible in live UIs val pools = sc.map(_.getPoolForName(poolName).getOrElse { throw new IllegalArgumentException(s"Unknown poolname: $poolName") }).toSeq val poolTable = new PoolTable(pools, parent) var content = <h4>Summary </h4> ++ poolTable.toNodeSeq if (shouldShowActiveStages) { content ++= <h4>{activeStages.size} Active Stages</h4> ++ activeStagesTable.toNodeSeq } UIUtils.headerSparkPage("Fair Scheduler Pool: " + poolName, content, parent) } } }
Example 9
Source File: PagedTableSuite.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.ui import scala.xml.Node import org.apache.spark.SparkFunSuite class PagedDataSourceSuite extends SparkFunSuite { test("basic") { val dataSource1 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource1.pageData(1) === PageData(3, (1 to 2))) val dataSource2 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource2.pageData(2) === PageData(3, (3 to 4))) val dataSource3 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource3.pageData(3) === PageData(3, Seq(5))) val dataSource4 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) val e1 = intercept[IndexOutOfBoundsException] { dataSource4.pageData(4) } assert(e1.getMessage === "Page 4 is out of range. Please select a page number between 1 and 3.") val dataSource5 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) val e2 = intercept[IndexOutOfBoundsException] { dataSource5.pageData(0) } assert(e2.getMessage === "Page 0 is out of range. Please select a page number between 1 and 3.") } } class PagedTableSuite extends SparkFunSuite { test("pageNavigation") { // Create a fake PagedTable to test pageNavigation val pagedTable = new PagedTable[Int] { override def tableId: String = "" override def tableCssClass: String = "" override def dataSource: PagedDataSource[Int] = null override def pageLink(page: Int): String = page.toString override def headers: Seq[Node] = Nil override def row(t: Int): Seq[Node] = Nil override def pageSizeFormField: String = "pageSize" override def prevPageSizeFormField: String = "prevPageSize" override def pageNumberFormField: String = "page" override def goButtonFormPath: String = "" } assert(pagedTable.pageNavigation(1, 10, 1) === Nil) assert( (pagedTable.pageNavigation(1, 10, 2).head \\ "li").map(_.text.trim) === Seq("1", "2", ">")) assert( (pagedTable.pageNavigation(2, 10, 2).head \\ "li").map(_.text.trim) === Seq("<", "1", "2")) assert((pagedTable.pageNavigation(1, 10, 100).head \\ "li").map(_.text.trim) === (1 to 10).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(2, 10, 100).head \\ "li").map(_.text.trim) === Seq("<") ++ (1 to 10).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(100, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 100).map(_.toString)) assert((pagedTable.pageNavigation(99, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 100).map(_.toString) ++ Seq(">")) assert((pagedTable.pageNavigation(11, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (11 to 20).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(93, 10, 97).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 97).map(_.toString) ++ Seq(">")) } } private[spark] class SeqPagedDataSource[T](seq: Seq[T], pageSize: Int) extends PagedDataSource[T](pageSize) { override protected def dataSize: Int = seq.size override protected def sliceData(from: Int, to: Int): Seq[T] = seq.slice(from, to) }
Example 10
Source File: StagePageSuite.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.ui import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.mockito.Mockito.{mock, when, RETURNS_SMART_NULLS} import org.apache.spark._ import org.apache.spark.executor.TaskMetrics import org.apache.spark.scheduler._ import org.apache.spark.storage.StorageStatusListener import org.apache.spark.ui.exec.ExecutorsListener import org.apache.spark.ui.jobs.{JobProgressListener, StagePage, StagesTab} import org.apache.spark.ui.scope.RDDOperationGraphListener class StagePageSuite extends SparkFunSuite with LocalSparkContext { private val peakExecutionMemory = 10 test("peak execution memory only displayed if unsafe is enabled") { val unsafeConf = "spark.sql.unsafe.enabled" val conf = new SparkConf(false).set(unsafeConf, "true") val html = renderStagePage(conf).toString().toLowerCase val targetString = "peak execution memory" assert(html.contains(targetString)) // Disable unsafe and make sure it's not there val conf2 = new SparkConf(false).set(unsafeConf, "false") val html2 = renderStagePage(conf2).toString().toLowerCase assert(!html2.contains(targetString)) // Avoid setting anything; it should be displayed by default val conf3 = new SparkConf(false) val html3 = renderStagePage(conf3).toString().toLowerCase assert(html3.contains(targetString)) } test("SPARK-10543: peak execution memory should be per-task rather than cumulative") { val unsafeConf = "spark.sql.unsafe.enabled" val conf = new SparkConf(false).set(unsafeConf, "true") val html = renderStagePage(conf).toString().toLowerCase // verify min/25/50/75/max show task value not cumulative values assert(html.contains(s"<td>$peakExecutionMemory.0 b</td>" * 5)) } private def renderStagePage(conf: SparkConf): Seq[Node] = { val jobListener = new JobProgressListener(conf) val graphListener = new RDDOperationGraphListener(conf) val executorsListener = new ExecutorsListener(new StorageStatusListener(conf), conf) val tab = mock(classOf[StagesTab], RETURNS_SMART_NULLS) val request = mock(classOf[HttpServletRequest]) when(tab.conf).thenReturn(conf) when(tab.progressListener).thenReturn(jobListener) when(tab.operationGraphListener).thenReturn(graphListener) when(tab.executorsListener).thenReturn(executorsListener) when(tab.appName).thenReturn("testing") when(tab.headerTabs).thenReturn(Seq.empty) when(request.getParameter("id")).thenReturn("0") when(request.getParameter("attempt")).thenReturn("0") val page = new StagePage(tab) // Simulate a stage in job progress listener val stageInfo = new StageInfo(0, 0, "dummy", 1, Seq.empty, Seq.empty, "details") // Simulate two tasks to test PEAK_EXECUTION_MEMORY correctness (1 to 2).foreach { taskId => val taskInfo = new TaskInfo(taskId, taskId, 0, 0, "0", "localhost", TaskLocality.ANY, false) jobListener.onStageSubmitted(SparkListenerStageSubmitted(stageInfo)) jobListener.onTaskStart(SparkListenerTaskStart(0, 0, taskInfo)) taskInfo.markFinished(TaskState.FINISHED) val taskMetrics = TaskMetrics.empty taskMetrics.incPeakExecutionMemory(peakExecutionMemory) jobListener.onTaskEnd( SparkListenerTaskEnd(0, 0, "result", Success, taskInfo, taskMetrics)) } jobListener.onStageCompleted(SparkListenerStageCompleted(stageInfo)) page.render(request) } }
Example 11
Source File: XPath.scala From xtract with Apache License 2.0 | 5 votes |
package com.lucidchart.open.xtract import scala.util.matching.Regex import scala.xml.{Node, NodeSeq} sealed trait XPathNode extends Function[NodeSeq, NodeSeq] case class IdxXPathNode(idx: Int) extends XPathNode { def apply(xml: NodeSeq): NodeSeq = xml(idx) override def toString = s"[$idx]" } case class KeyXPathNode(key: String) extends XPathNode { def apply(xml: NodeSeq): NodeSeq = xml \ key override def toString = s"/$key" } case class RecursiveXPathNode(key: String) extends XPathNode { def apply(xml: NodeSeq): NodeSeq = xml \\ key override def toString = s"//$key" } case class RecursiveWildCardXPathNode(regex: Regex) extends XPathNode { def apply(xml: NodeSeq): NodeSeq = (xml \\ "_").filter(node => node.label.matches(regex.regex)) override def toString = s"//?$regex" } case class WildCardXPathNode(regex: Regex) extends XPathNode { def apply(xml: NodeSeq): NodeSeq = (xml \ "_").filter(node => node.label.matches(regex.regex)) override def toString = s"/?$regex" } case class AttributedXPathNode(attr: String, value: Option[String]) extends XPathNode { def apply(xml: NodeSeq): NodeSeq = xml.filter{ node => node.attribute(attr) match { case Some(attrValues) => { value.fold(true)(_ == attrValues.toString) } case None => false } } override def toString = { value match { case Some(v) => s"[@$attr=$v]" case None => s"[@$attr]" } } } object XPath extends XPath(Nil) { }
Example 12
Source File: ScalastyleInspectionsGenerator.scala From sonar-scala with GNU Lesser General Public License v3.0 | 5 votes |
package com.mwz.sonar.scala.metadata.scalastyle import java.io.InputStream import java.nio.file.Paths import com.mwz.sonar.scala.metadata.scalastyle._ import com.typesafe.config.{Config, ConfigFactory} import org.scalastyle.{Level, _} import sbt.Keys._ import sbt._ import scala.meta._ import scala.xml.{Node, NodeSeq, XML} def transform(source: Tree, inspections: Seq[ScalastyleInspection]): Tree = { val stringified: Seq[String] = inspections.collect { case inspection => // Is there a better way of embedding multi-line text? val extraDescription = inspection.extraDescription.map(s => "\"\"\"" + s + "\"\"\"") val justification = inspection.justification.map(s => "\"\"\"" + s + "\"\"\"") val params = inspection.params.map { p => s""" |ScalastyleParam( | name = "${p.name}", | typ = ${p.typ}, | label = "${p.label}", | description = \"\"\"${p.description}\"\"\", | default = \"\"\"${p.default}\"\"\" |) """.stripMargin } // It doesn't seem to be straightforward to automatically convert a collection // into a tree using scalameta, so I'm turning it into a String so it can be parsed, // which is easier than constructing the tree manually. // Totally doable with shapeless though, but it would be a bit of an overkill in this case. s""" |ScalastyleInspection( | clazz = "${inspection.clazz}", | id = "${inspection.id}", | label = "${inspection.label}", | description = "${inspection.description}", | extraDescription = $extraDescription, | justification = $justification, | defaultLevel = ${inspection.defaultLevel}, | params = ${params.toString.parse[Term].get.syntax} |) """.stripMargin } // Transform the template file. val term: Term = stringified.toString.parse[Term].get source.transform { case q"val AllInspections: $tpe = $expr" => q"val AllInspections: $tpe = $term" } } }
Example 13
Source File: ScoverageReportParser.scala From sonar-scala with GNU Lesser General Public License v3.0 | 5 votes |
package com.mwz.sonar.scala package scoverage import java.nio.file.{Path, Paths} import scala.xml.{Node, XML} import cats.syntax.semigroup.catsSyntaxSemigroup import com.mwz.sonar.scala.util.PathUtils import org.sonar.api.scanner.ScannerSide private[scoverage] def extractScoverageFromNode(node: Node): Scoverage = { val branches = (node \\ "statement") .filter(node => !(node \@ "ignored").toBoolean && (node \@ "branch").toBoolean) val coveredBranches = branches.filter(statement => (statement \@ "invocation-count").toInt > 0) Scoverage( statements = (node \@ "statement-count").toInt, coveredStatements = (node \@ "statements-invoked").toInt, statementCoverage = (node \@ "statement-rate").toDouble, branches = branches.size, coveredBranches = coveredBranches.size, branchCoverage = (node \@ "branch-rate").toDouble ) } }
Example 14
Source File: SpotXmlParser.scala From dbpedia-spotlight-model with Apache License 2.0 | 5 votes |
package org.dbpedia.spotlight.spot import org.dbpedia.spotlight.model.{SurfaceForm, SurfaceFormOccurrence, Text} import scala.collection.JavaConversions._ import scala.xml.{Node, XML} def extract(spotsXml: Text): java.util.List[SurfaceFormOccurrence] = { val xml = XML.loadString(spotsXml.text) val text = (xml \\ "annotation" \ "@text").toString val surfaceForms = xml \\"annotation" \ "surfaceForm" val occs = surfaceForms.map(buildOcc(_, new Text(text))) occs.toList } def buildOcc(sf: Node, text: Text) = { val offset = (sf \ "@offset").toString.toInt val name = (sf \ "@name").toString new SurfaceFormOccurrence(new SurfaceForm(name), text, offset) } def getName() = name def setName(n: String) { name = n; } } object SpotXmlParser { def main(args: Array[String]) { val xml = "<annotation text=\"The research, which is published online May 22 in the European Heart Journal, opens up the prospect of treating heart failure patients with their own, human-induced pluripotent stem cells (hiPSCs) to repair their damaged hearts.\">\n<surfaceForm name=\"published\" offset=\"23\"/>\n<surfaceForm name=\"May 22\" offset=\"40\"/>\n<surfaceForm name=\"European\" offset=\"54\"/>\n<surfaceForm name=\"Heart\" offset=\"63\"/>\n<surfaceForm name=\"Journal\" offset=\"69\"/>\n<surfaceForm name=\"prospect\" offset=\"91\"/>\n<surfaceForm name=\"heart failure\" offset=\"112\"/>\n<surfaceForm name=\"patients\" offset=\"126\"/>\n<surfaceForm name=\"human\" offset=\"151\"/>\n<surfaceForm name=\"stem cells\" offset=\"177\"/>\n<surfaceForm name=\"hearts\" offset=\"221\"/>\n</annotation>" val spotter = new SpotXmlParser() spotter.extract(new Text(xml)).foreach(println) } }
Example 15
Source File: ExecutorNumTab.scala From XSQL with Apache License 2.0 | 5 votes |
package org.apache.spark.monitor import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.ui.{SparkUI, SparkUITab, UIUtils, WebUIPage} private class ExecutorNumTab(parent: SparkUI) extends SparkUITab(parent, "resources") { init() private def init(): Unit = { attachPage(new ExecutorNumPage(this)) } } private class ExecutorNumPage(parent: SparkUITab) extends WebUIPage("") { def render(request: HttpServletRequest): Seq[Node] = { val content = <div> { <div id ="echart-container" class="row-fluid" style="height: 600px"></div> ++ <script type="text/javascript" src="http://echarts.baidu.com/gallery/vendors/echarts/echarts.min.js"></script> ++ <script src={UIUtils.prependBaseUri( request, "/static/special/executornumpage.js")}></script> } </div> UIUtils.headerSparkPage(request, "ExecutorNumCurve", content, parent, useDataTables = false) } }
Example 16
Source File: CollectionTypes.scala From xmlrpc with MIT License | 5 votes |
package xmlrpc.protocol import xmlrpc.protocol.Deserializer.Deserialized import scala.xml.{NodeSeq, Node} import scala.language.postfixOps import scalaz.Scalaz._ trait CollectionTypes extends Protocol { import Deserializer.StringToError // We only support array of the same type, if an array contains elements with different // types, we deserialize it with case classes implicit def ArrayXmlrpc[T: Datatype]: Datatype[Seq[T]] = new Datatype[Seq[T]] { override def serialize(value: Seq[T]): Node = <array><data>{for {elem <- value} yield toXmlrpc(elem)}</data></array>.inValue override def deserialize(from: NodeSeq): Deserialized[Seq[T]] = from \\ "array" headOption match { case Some(<array><data>{array @ _*}</data></array>) => (for { value <- array} yield fromXmlrpc[T](value)).toList.sequence[Deserialized, T] case _ => "Expected array structure in $from".toError.failures } } implicit def StructXmlrpc[T: Datatype]: Datatype[Map[String, T]] = new Datatype[Map[String, T]] { override def serialize(map: Map[String, T]): Node = { def inName(name: String): Node = <name>{name}</name> def inMember(elems: NodeSeq): NodeSeq = <member>{elems}</member> lazy val struct: NodeSeq = (for { (key, value) <- map } yield inMember(inName(key) ++ toXmlrpc(value))).reduce(_ ++ _) <struct>{struct}</struct>.inValue } override def deserialize(from: NodeSeq): Deserialized[Map[String, T]] = from \\ "struct" headOption match { case Some(<struct>{members @ _*}</struct>) => (for { member <- members } yield fromXmlrpc[T](member \ "value" head) map ((member \ "name" text) -> _)) .toList .sequence[Deserialized, (String, T)] .map(_.toMap[String, T]) case _ => s"Expected struct in:\n$from".toError.failures } } }
Example 17
Source File: OsmRecordReader.scala From magellan with Apache License 2.0 | 5 votes |
package magellan.mapreduce import magellan.io.{OsmKey, OsmShape, OsmNode, OsmWay, OsmRelation} import org.apache.hadoop.mapreduce.lib.input.FileSplit import org.apache.hadoop.mapreduce.{InputSplit, RecordReader, TaskAttemptContext} import scala.xml.{XML, Elem, Node} private[magellan] class OsmRecordReader extends RecordReader[OsmKey, OsmShape] { val definedNodeLabels = Set("node", "way", "relation") var nodes : Seq[Node] = _ var current : Int = 0 lazy val total = nodes.length override def initialize(genericSplit: InputSplit, context: TaskAttemptContext) : Unit = { val split: FileSplit = genericSplit.asInstanceOf[FileSplit] val job = MapReduceUtils.getConfigurationFromContext(context) val file = split.getPath() val fs = file.getFileSystem(job) val fileIn = fs.open(file) val doc = XML.load(fileIn) fileIn.close() nodes = doc.child.filter(n => definedNodeLabels contains n.label) } override def nextKeyValue() : Boolean = { if (!nodes.isEmpty) { if (current != 0) nodes = nodes.tail current += 1 } !nodes.isEmpty } override def getCurrentKey() : OsmKey = { val current = nodes.head new OsmKey(current.label, (current \ "@id").text) } def getTags(shape: Node) = { (shape \ "tag").map(t => (t \ "@k").text -> (t \ "@v").text).toMap } def getOsmNode(shape: Node) = { new OsmNode( (shape \ "@id").text, (shape \ "@lat").text.toDouble, (shape \ "@lon").text.toDouble, getTags(shape)) } def getOsmWay(shape: Node) = { new OsmWay((shape \ "@id").text, (shape \ "nd").map(w => (w \ "@ref").text), getTags(shape)) } def getOsmRelation(shape: Node) = { new OsmRelation( (shape \ "@id").text, (shape \ "member").map(r => (r \ "@ref").text), getTags(shape) ) } override def getCurrentValue() : OsmShape = { val current = nodes.head current.label match { case "node" => getOsmNode(current) case "way" => getOsmWay(current) case "relation" => getOsmRelation(current) } } override def getProgress() : Float = { current.toFloat / total } override def close() : Unit = { } }
Example 18
Source File: request.scala From wix-http-testkit with MIT License | 5 votes |
package com.wix.e2e.http.client.transformers.internals import java.io.File import akka.http.scaladsl.model.Uri.Query import akka.http.scaladsl.model._ import akka.http.scaladsl.model.headers.{Cookie, RawHeader, `User-Agent`} import akka.util.ByteString import com.wix.e2e.http.api.Marshaller import com.wix.e2e.http.client.transformers._ import com.wix.e2e.http.client.transformers.internals.RequestPartOps._ import com.wix.e2e.http.exceptions.UserAgentModificationNotSupportedException import com.wix.e2e.http.{RequestTransformer, WixHttpTestkitResources} import scala.xml.Node trait HttpClientRequestUrlTransformers { def withParam(param: (String, String)): RequestTransformer = withParams(param) def withParams(params: (String, String)*): RequestTransformer = r => r.copy(uri = r.uri .withQuery( Query(currentParams(r) ++ params: _*)) ) private def currentParams(r: HttpRequest): Seq[(String, String)] = r.uri.rawQueryString .map( Query(_).toSeq ) .getOrElse( Seq.empty ) } trait HttpClientRequestHeadersTransformers { def withHeader(header: (String, String)): RequestTransformer = withHeaders(header) def withHeaders(headers: (String, String)*): RequestTransformer = appendHeaders( headers.map { case (h, _) if h.toLowerCase == "user-agent" => throw new UserAgentModificationNotSupportedException case (h, v) => RawHeader(h, v) } ) def withUserAgent(value: String): RequestTransformer = appendHeaders(Seq(`User-Agent`(value))) def withCookie(cookie: (String, String)): RequestTransformer = withCookies(cookie) def withCookies(cookies: (String, String)*): RequestTransformer = appendHeaders( cookies.map(p => Cookie(p._1, p._2)) ) private def appendHeaders[H <: HttpHeader](headers: Iterable[H]): RequestTransformer = r => r.withHeaders( r.headers ++ headers) } trait HttpClientRequestBodyTransformers extends HttpClientContentTypes { @deprecated("use `withTextPayload`", since = "Dec18, 2017") def withPayload(body: String, contentType: ContentType = TextPlain): RequestTransformer = withPayload(ByteString(body).toByteBuffer.array, contentType) def withTextPayload(body: String, contentType: ContentType = TextPlain): RequestTransformer = withPayload(ByteString(body).toByteBuffer.array, contentType) def withPayload(bytes: Array[Byte], contentType: ContentType): RequestTransformer = setBody(HttpEntity(contentType, bytes)) def withPayload(xml: Node): RequestTransformer = setBody(HttpEntity(XmlContent, WixHttpTestkitResources.xmlPrinter.format(xml))) // todo: enable default marshaller when deprecated `withPayload` is removed def withPayload(entity: AnyRef)(implicit marshaller: Marshaller): RequestTransformer = withTextPayload(marshaller.marshall(entity), JsonContent) def withFormData(formParams: (String, String)*): RequestTransformer = setBody(FormData(formParams.toMap).toEntity) def withMultipartData(parts: (String, RequestPart)*): RequestTransformer = setBody( Multipart.FormData(parts.map { case (n, p) => Multipart.FormData.BodyPart(n, p.asBodyPartEntity, p.withAdditionalParams) }:_*) .toEntity) private def setBody(entity: RequestEntity): RequestTransformer = _.copy(entity = entity) } object RequestPartOps { implicit class `RequestPart --> HttpEntity`(private val r: RequestPart) extends AnyVal { def asBodyPartEntity: BodyPartEntity = r match { case PlainRequestPart(v, c) => HttpEntity(v).withContentType(c) case BinaryRequestPart(b, c, _) => HttpEntity(c, b) case FileRequestPart(f, c, _) => HttpEntity.fromPath(c, f.toPath) case FileNameRequestPart(p, c, fn) => FileRequestPart(new File(p), c, fn).asBodyPartEntity } } implicit class `RequestPart --> AdditionalParams`(private val r: RequestPart) extends AnyVal { def withAdditionalParams: Map[String, String] = r match { case _: PlainRequestPart => NoAdditionalParams case BinaryRequestPart(_, _, fn) => additionalParams(fn) case FileRequestPart(_, _, fn) => additionalParams(fn) case FileNameRequestPart(_, _, fn) => additionalParams(fn) } private def additionalParams(filenameOpt: Option[String]) = filenameOpt.map(fn => Map("filename" -> fn)) .getOrElse( NoAdditionalParams ) private def NoAdditionalParams = Map.empty[String, String] } } trait HttpClientRequestTransformersOps { implicit class TransformerConcatenation(first: RequestTransformer) { def and(second: RequestTransformer): RequestTransformer = first andThen second } }
Example 19
Source File: FiberCacheManagerPage.scala From OAP with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.oap.ui import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.internal.Logging import org.apache.spark.sql.execution.datasources.oap.filecache.CacheStats import org.apache.spark.sql.oap.OapRuntime import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class FiberCacheManagerPage(parent: OapTab) extends WebUIPage("") with Logging { def render(request: HttpServletRequest): Seq[Node] = { val content = <div> { <div id="active-cms"></div> ++ <script src={UIUtils.prependBaseUri(request, parent.basePath, "/static/utils.js")}></script> ++ <script src={UIUtils.prependBaseUri(request, parent.basePath, "/static/oap/oap.js")}></script> } </div> UIUtils.headerSparkPage(request, "FiberCacheManager", content, parent, useDataTables = true) } } class FiberCacheManagerSummary private[spark]( val id: String, val hostPort: String, val isActive: Boolean, val indexDataCacheSeparationEnable: Boolean, val memoryUsed: Long, val maxMemory: Long, val cacheSize: Long, val cacheCount: Long, val backendCacheSize: Long, val backendCacheCount: Long, val dataFiberSize: Long, val dataFiberCount: Long, val indexFiberSize: Long, val indexFiberCount: Long, val pendingFiberSize: Long, val pendingFiberCount: Long, val dataFiberHitCount: Long, val dataFiberMissCount: Long, val dataFiberLoadCount: Long, val dataTotalLoadTime: Long, val dataEvictionCount: Long, val indexFiberHitCount: Long, val indexFiberMissCount: Long, val indexFiberLoadCount: Long, val indexTotalLoadTime: Long, val indexEvictionCount: Long)
Example 20
Source File: KerasParser.scala From jigg with Apache License 2.0 | 5 votes |
package jigg.ml.keras private val tagset:Map[Int, String] = Map(0 -> "B", 1 -> "I", 2 -> "O") def parsing(str: String): Array[(Int, Int)] = { // For dummy input to indicate boundaries of sentence. val s = "\n" + str + "\n\n" val inputData = table.encodeCharacter(s) val outputData = model.convert(inputData) val tags = for { i <- 1 until outputData.rows - 2 maxID = argmax(outputData(i, ::)) } yield maxID getOffsets(tags.toArray) } def parsing(tokens: Node): Array[Array[String]] = { // For dummy input to indicate boundaries of sentence. val words = Array("\n").union( (tokens \\ "tokens").flatMap(x => x \\ "@lemma").toArray.map(x => x.toString)).union(Array("\n\n")) val ids = (tokens \\ "tokens").flatMap(x => x \\ "@id").toArray.map(x => x.toString) val inputData = table.encodeWords(words) val outputData = model.convert(inputData) val tags = for { i <- 1 until outputData.rows - 2 maxID = argmax(outputData(i, ::)) } yield maxID val ranges = getOffsets(tags.toArray) ranges.map(x => ids.slice(x._1, x._2)) } def getOffsets(data: Array[Int]): Array[(Int, Int)]= { val ranges = ArrayBuffer[(Int, Int)]() var bpos = -1 for(i <- data.indices){ tagset(data(i)) match{ case "B" => if(bpos >= 0) ranges += ((bpos, i)) bpos = i case "I" if i == 0 || bpos == -2 => bpos = i case "O" => if (bpos >= 0) ranges += ((bpos, i)) bpos = -2 case _ if i == data.indices.last => ranges += ((bpos, i + 1)) case _ => } } ranges.toArray } }
Example 21
Source File: RegexSentenceAnnotator.scala From jigg with Apache License 2.0 | 5 votes |
package jigg.pipeline import java.util.Properties import scala.io.Source import scala.xml.{Node, Elem, Text, Atom} import jigg.util.XMLUtil.RichNode class RegexSentenceAnnotator(override val name: String, override val props: Properties) extends Annotator { @Prop(gloss = "Regular expression to segment lines (if omitted, specified method is used)") var pattern = "" @Prop(gloss = "Use predefined segment pattern newLine|point|pointAndNewLine") var method = "pointAndNewLine" readProps() val splitRegex = pattern match { case "" => method match { case "newLine" => RegexSentenceAnnotator.newLine case "point" => RegexSentenceAnnotator.point case "pointAndNewLine" => RegexSentenceAnnotator.pointAndNewLine case other => argumentError("method") } case pattern => pattern.r } private[this] val sentenceIDGen = jigg.util.IDGenerator("s") override def annotate(annotation: Node): Node = { annotation.replaceAll("document") { e => val line = e.text val sentenceBoundaries = 0 +: splitRegex.findAllMatchIn(line).map(_.end).toVector :+ line.length val sentences: Vector[Node] = sentenceBoundaries.sliding(2).toVector flatMap { case Seq(begin_, end_) => def isSpace(c: Char) = c == ' ' || c == '\t' || c == '\n' val snippet = line.substring(begin_, end_) val begin = snippet.indexWhere(!isSpace(_)) match { case -1 => begin_ // space only case offset => begin_ + offset } val end = snippet.lastIndexWhere(!isSpace(_)) match { case -1 => begin_ case offset => begin_ + offset + 1 } // val sentence: String = line.substring(begin, end).trim() val sentence: String = line.substring(begin, end) if (sentence.isEmpty) None else { Option(<sentence id={ sentenceIDGen.next } characterOffsetBegin={ begin+"" } characterOffsetEnd={ end+"" }>{ sentence }</sentence>) } } // val textRemoved = XMLUtil.removeText(e) // XMLUtil.addChild(textRemoved, <sentences>{ sentences }</sentences>) e addChild <sentences>{ sentences }</sentences> } } override def requires = Set() override def requirementsSatisfied = Set(Requirement.Ssplit) } object RegexSentenceAnnotator extends AnnotatorCompanion[RegexSentenceAnnotator] { val newLine = """\n+""".r val point = """。+""".r val pointAndNewLine = """\n+|。\n*""".r }
Example 22
Source File: SpaceTokenizerAnnotator.scala From jigg with Apache License 2.0 | 5 votes |
package jigg.pipeline class SpaceTokenizerAnnotator(override val name: String, override val props: Properties) extends SentencesAnnotator { override def newSentenceAnnotation(sentence: Node): Node = { val sindex = sentence \@ "id" val text = sentence.text val range = (0 until text.size) def isSpace(c: Char) = c == ' ' || c == '\t' val begins = 0 +: (1 until text.size).filter { i => isSpace(text(i-1)) && !isSpace(text(i)) } val ends = begins map { range indexWhere (i=>isSpace(text(i)), _) match { case -1 => text.size case e => e } } val tokenSeq = begins.zip(ends).zipWithIndex map { case ((b, e), i) => <token id={ sindex + "_tok" + i } form={ text.substring(b, e) } characterOffsetBegin={ b+"" } characterOffsetEnd={ e+"" }/> } val tokens = <tokens annotators={ name }>{ tokenSeq }</tokens> sentence addChild tokens } override def requires = Set(Requirement.Ssplit) override def requirementsSatisfied = Set(Requirement.Tokenize) }
Example 23
Source File: DocumentAnnotator.scala From jigg with Apache License 2.0 | 5 votes |
package jigg.pipeline trait DocumentAnnotator extends Annotator { override def annotate(annotation: Node): Node = { annotation.replaceAll("root") { case e: Elem => val newChild = Annotator.makePar(e.child, nThreads).map { c => c match { case c if c.label == "document" => try newDocumentAnnotation(c) catch { case e: AnnotationError => System.err.println(s"Failed to annotate a document by $name.") Annotator.annotateError(c, name, e) } case c => c } }.seq e.copy(child = newChild) } } def newDocumentAnnotation(sentence: Node): Node } trait SeqDocumentAnnotator extends DocumentAnnotator { override def nThreads = 1 }
Example 24
Source File: BunsetsuKerasAnnotatorTest.scala From jigg with Apache License 2.0 | 5 votes |
package jigg.pipeline import java.util.Properties import org.scalatest.FunSuite import org.scalatest.Matchers._ import scala.xml.{NodeSeq, Node} class BunsetsuKerasAnnotatorTest extends FunSuite { def findPath(localPath: String): String = getClass.getClassLoader.getResource(localPath).getPath def segment(node: Node, properties: Properties): NodeSeq = { val bunsetsuSplitter = new IPABunsetsuKerasAnnotator("bunsetsuKeras", properties) bunsetsuSplitter.mkLocalAnnotator.newSentenceAnnotation(node) } val properties = new Properties properties.setProperty("bunsetsuKeras.model", findPath("./data/keras/bunsetsu_model.h5")) properties.setProperty("bunsetsuKeras.table", findPath("data/keras/jpnLookupWords.json")) test("do chunking") { val chunks = segment(Sentences.xml("oneSentence"),properties) \\ "chunk" chunks.length should be (2) } object Sentences { val xml = Map("oneSentence" -> <sentence id="s1" characterOffsetBegin="0" characterOffsetEnd="6"> 梅が咲いた。 <tokens annotators="mecab"> <token id="s1_tok0" form="梅" offsetBegin="0" offsetEnd="1" pos="名詞" pos1="一般" pos2="*" pos3="*" cType="*" cForm="*" lemma="梅" yomi="ウメ" pron="ウメ"/> <token id="s1_tok1" form="が" offsetBegin="1" offsetEnd="2" pos="助詞" pos1="格助詞" pos2="一般" pos3="*" cType="*" cForm="*" lemma="が" yomi="ガ" pron="ガ"/> <token id="s1_tok2" form="咲い" offsetBegin="2" offsetEnd="4" pos="動詞" pos1="自立" pos2="*" pos3="*" cType="五段・カ行イ音便" cForm="連用タ接続" lemma="咲く" yomi="サイ" pron="サイ"/> <token id="s1_tok3" form="た" offsetBegin="4" offsetEnd="5" pos="助動詞" pos1="*" pos2="*" pos3="*" cType="特殊・タ" cForm="基本形" lemma="た" yomi="タ" pron="タ"/> <token id="s1_tok4" form="。" offsetBegin="5" offsetEnd="6" pos="記号" pos1="句点" pos2="*" pos3="*" cType="*" cForm="*" lemma="。" yomi="。" pron="。"/> </tokens> </sentence> ) } }
Example 25
Source File: MecabAnnotatorSpec.scala From jigg with Apache License 2.0 | 5 votes |
package jigg.pipeline import java.util.Properties import scala.xml.Node import org.scalatest._ class MecabAnnotatorSpec extends BaseAnnotatorSpec { def stubCom(output: String) = new StubExternalCommunicator(output) def mapCom(responces: Map[String, String]) = new MapStubExternalCommunicator(responces) def newIPA(mkCom: ()=>IOCommunicator, threads: Int = 1, p: Properties = new Properties) = new IPAMecabAnnotator("mecab", p) { override def mkLocalAnnotator = new IPALocalMecabAnnotator { override def mkCommunicator = mkCom() } override def nThreads = threads } "Annotator with nThreads=1" should "be able to annotate one sentence" in { val s = "a" val in = <root><document><sentences><sentence id="s0">a</sentence></sentences></document></root> val out = """a 名詞,固有名詞,組織,*,*,*,* EOS""" val annotator = newIPA(()=>stubCom(out), threads=1) val result = annotator.annotate(in) val tokens = result \\ "token" tokens.size should be(1) (tokens(0) \@ "pos") should be("名詞") result \\ "tokens" \@ "annotators" should be("mecab") } "Annotator with nThreads=2" should "annotate in parallel" in { val responces = Map( "a" -> """a 名詞,固有名詞,*,*,*,*,* EOS""", "b" -> """b 動詞,*,*,*,*,*,* EOS""", "c" -> """c 形容詞,*,*,*,*,*,* EOS""" ) val in = <root> <document> <sentences> <sentence id="s0">a</sentence> <sentence id="s1">b</sentence> <sentence id="s2">c</sentence> </sentences> </document> </root> val annotator = newIPA(()=>mapCom(responces), threads=2) val result = annotator.annotate(in) val sentences = result \\ "sentence" sentences.size should be(3) ((sentences(0) \\ "token")(0) \@ "form") should be("a") ((sentences(1) \\ "token")(0) \@ "form") should be("b") ((sentences(2) \\ "token")(0) \@ "form") should be("c") } }
Example 26
Source File: HistoryPage.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.history import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.ui.{UIUtils, WebUIPage} private[history] class HistoryPage(parent: HistoryServer) extends WebUIPage("") { def render(request: HttpServletRequest): Seq[Node] = { val requestedIncomplete = Option(request.getParameter("showIncomplete")).getOrElse("false").toBoolean val allAppsSize = parent.getApplicationList().count(_.completed != requestedIncomplete) val eventLogsUnderProcessCount = parent.getEventLogsUnderProcess() val lastUpdatedTime = parent.getLastUpdatedTime() val providerConfig = parent.getProviderConfig() val content = <script src={UIUtils.prependBaseUri("/static/historypage-common.js")}></script> <div> <div class="span12"> <ul class="unstyled"> {providerConfig.map { case (k, v) => <li><strong>{k}:</strong> {v}</li> }} </ul> { if (eventLogsUnderProcessCount > 0) { <p>There are {eventLogsUnderProcessCount} event log(s) currently being processed which may result in additional applications getting listed on this page. Refresh the page to view updates. </p> } } { if (lastUpdatedTime > 0) { <p>Last updated: <span id="last-updated">{lastUpdatedTime}</span></p> } } { if (allAppsSize > 0) { <script src={UIUtils.prependBaseUri("/static/dataTables.rowsGroup.js")}></script> ++ <div id="history-summary" class="span12 pagination"></div> ++ <script src={UIUtils.prependBaseUri("/static/utils.js")}></script> ++ <script src={UIUtils.prependBaseUri("/static/historypage.js")}></script> ++ <script>setAppLimit({parent.maxApplications})</script> } else if (requestedIncomplete) { <h4>No incomplete applications found!</h4> } else if (eventLogsUnderProcessCount > 0) { <h4>No completed applications found!</h4> } else { <h4>No completed applications found!</h4> ++ parent.emptyListingHtml } } <a href={makePageLink(!requestedIncomplete)}> { if (requestedIncomplete) { "Back to completed applications" } else { "Show incomplete applications" } } </a> </div> </div> UIUtils.basicSparkPage(content, "History Server", true) } private def makePageLink(showIncomplete: Boolean): String = { UIUtils.prependBaseUri("/?" + "showIncomplete=" + showIncomplete) } }
Example 27
Source File: ExecutorsPage.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.exec import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.status.api.v1.ExecutorSummary import org.apache.spark.ui.{UIUtils, WebUIPage} // This isn't even used anymore -- but we need to keep it b/c of a MiMa false positive private[ui] case class ExecutorSummaryInfo( id: String, hostPort: String, rddBlocks: Int, memoryUsed: Long, diskUsed: Long, activeTasks: Int, failedTasks: Int, completedTasks: Int, totalTasks: Int, totalDuration: Long, totalInputBytes: Long, totalShuffleRead: Long, totalShuffleWrite: Long, maxMemory: Long, executorLogs: Map[String, String]) private[ui] class ExecutorsPage( parent: ExecutorsTab, threadDumpEnabled: Boolean) extends WebUIPage("") { private val listener = parent.listener def render(request: HttpServletRequest): Seq[Node] = { val content = <div> { <div id="active-executors"></div> ++ <script src={UIUtils.prependBaseUri("/static/utils.js")}></script> ++ <script src={UIUtils.prependBaseUri("/static/executorspage.js")}></script> ++ <script>setThreadDumpEnabled({threadDumpEnabled})</script> } </div>; UIUtils.headerSparkPage("Executors", content, parent, useDataTables = true) } } private[spark] object ExecutorsPage { def getExecInfo( listener: ExecutorsListener, statusId: Int, isActive: Boolean): ExecutorSummary = { val status = if (isActive) { listener.activeStorageStatusList(statusId) } else { listener.deadStorageStatusList(statusId) } val execId = status.blockManagerId.executorId val hostPort = status.blockManagerId.hostPort val rddBlocks = status.numBlocks val memUsed = status.memUsed val maxMem = status.maxMem val diskUsed = status.diskUsed val taskSummary = listener.executorToTaskSummary.getOrElse(execId, ExecutorTaskSummary(execId)) new ExecutorSummary( execId, hostPort, isActive, rddBlocks, memUsed, diskUsed, taskSummary.totalCores, taskSummary.tasksMax, taskSummary.tasksActive, taskSummary.tasksFailed, taskSummary.tasksComplete, taskSummary.tasksActive + taskSummary.tasksFailed + taskSummary.tasksComplete, taskSummary.duration, taskSummary.jvmGCTime, taskSummary.inputBytes, taskSummary.shuffleRead, taskSummary.shuffleWrite, maxMem, taskSummary.executorLogs ) } }
Example 28
Source File: ExecutorThreadDumpPage.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.exec import javax.servlet.http.HttpServletRequest import scala.xml.{Node, Text} import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class ExecutorThreadDumpPage(parent: ExecutorsTab) extends WebUIPage("threadDump") { private val sc = parent.sc def render(request: HttpServletRequest): Seq[Node] = { val executorId = Option(request.getParameter("executorId")).map { executorId => UIUtils.decodeURLParameter(executorId) }.getOrElse { throw new IllegalArgumentException(s"Missing executorId parameter") } val time = System.currentTimeMillis() val maybeThreadDump = sc.get.getExecutorThreadDump(executorId) val content = maybeThreadDump.map { threadDump => val dumpRows = threadDump.sortWith { case (threadTrace1, threadTrace2) => val v1 = if (threadTrace1.threadName.contains("Executor task launch")) 1 else 0 val v2 = if (threadTrace2.threadName.contains("Executor task launch")) 1 else 0 if (v1 == v2) { threadTrace1.threadName.toLowerCase < threadTrace2.threadName.toLowerCase } else { v1 > v2 } }.map { thread => val threadId = thread.threadId val blockedBy = thread.blockedByThreadId match { case Some(blockedByThreadId) => <div> Blocked by <a href={s"#${thread.blockedByThreadId}_td_id"}> Thread {thread.blockedByThreadId} {thread.blockedByLock}</a> </div> case None => Text("") } val heldLocks = thread.holdingLocks.mkString(", ") <tr id={s"thread_${threadId}_tr"} class="accordion-heading" onclick={s"toggleThreadStackTrace($threadId, false)"} onmouseover={s"onMouseOverAndOut($threadId)"} onmouseout={s"onMouseOverAndOut($threadId)"}> <td id={s"${threadId}_td_id"}>{threadId}</td> <td id={s"${threadId}_td_name"}>{thread.threadName}</td> <td id={s"${threadId}_td_state"}>{thread.threadState}</td> <td id={s"${threadId}_td_locking"}>{blockedBy}{heldLocks}</td> <td id={s"${threadId}_td_stacktrace"} class="hidden">{thread.stackTrace}</td> </tr> } <div class="row-fluid"> <p>Updated at {UIUtils.formatDate(time)}</p> { // scalastyle:off <p><a class="expandbutton" onClick="expandAllThreadStackTrace(true)"> Expand All </a></p> <p><a class="expandbutton hidden" onClick="collapseAllThreadStackTrace(true)"> Collapse All </a></p> <div class="form-inline"> <div class="bs-example" data-example-id="simple-form-inline"> <div class="form-group"> <div class="input-group"> Search: <input type="text" class="form-control" id="search" oninput="onSearchStringChange()"></input> </div> </div> </div> </div> <p></p> // scalastyle:on } <table class={UIUtils.TABLE_CLASS_STRIPED + " accordion-group" + " sortable"}> <thead> <th onClick="collapseAllThreadStackTrace(false)">Thread ID</th> <th onClick="collapseAllThreadStackTrace(false)">Thread Name</th> <th onClick="collapseAllThreadStackTrace(false)">Thread State</th> <th onClick="collapseAllThreadStackTrace(false)">Thread Locks</th> </thead> <tbody>{dumpRows}</tbody> </table> </div> }.getOrElse(Text("Error fetching thread dump")) UIUtils.headerSparkPage(s"Thread dump for executor $executorId", content, parent) } }
Example 29
Source File: EnvironmentPage.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.env import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class EnvironmentPage(parent: EnvironmentTab) extends WebUIPage("") { private val listener = parent.listener private def removePass(kv: (String, String)): (String, String) = { if (kv._1.toLowerCase.contains("password") || kv._1.toLowerCase.contains("secret")) { (kv._1, "******") } else kv } def render(request: HttpServletRequest): Seq[Node] = { val runtimeInformationTable = UIUtils.listingTable( propertyHeader, jvmRow, listener.jvmInformation, fixedWidth = true) val sparkPropertiesTable = UIUtils.listingTable( propertyHeader, propertyRow, listener.sparkProperties.map(removePass), fixedWidth = true) val systemPropertiesTable = UIUtils.listingTable( propertyHeader, propertyRow, listener.systemProperties, fixedWidth = true) val classpathEntriesTable = UIUtils.listingTable( classPathHeaders, classPathRow, listener.classpathEntries, fixedWidth = true) val content = <span> <h4>Runtime Information</h4> {runtimeInformationTable} <h4>Spark Properties</h4> {sparkPropertiesTable} <h4>System Properties</h4> {systemPropertiesTable} <h4>Classpath Entries</h4> {classpathEntriesTable} </span> UIUtils.headerSparkPage("Environment", content, parent) } private def propertyHeader = Seq("Name", "Value") private def classPathHeaders = Seq("Resource", "Source") private def jvmRow(kv: (String, String)) = <tr><td>{kv._1}</td><td>{kv._2}</td></tr> private def propertyRow(kv: (String, String)) = <tr><td>{kv._1}</td><td>{kv._2}</td></tr> private def classPathRow(data: (String, String)) = <tr><td>{data._1}</td><td>{data._2}</td></tr> }
Example 30
Source File: PoolTable.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.jobs import java.net.URLEncoder import scala.collection.mutable.HashMap import scala.xml.Node import org.apache.spark.scheduler.{Schedulable, StageInfo} import org.apache.spark.ui.UIUtils private[ui] class PoolTable(pools: Seq[Schedulable], parent: StagesTab) { private val listener = parent.progressListener def toNodeSeq: Seq[Node] = { listener.synchronized { poolTable(poolRow, pools) } } private def poolTable( makeRow: (Schedulable, HashMap[String, HashMap[Int, StageInfo]]) => Seq[Node], rows: Seq[Schedulable]): Seq[Node] = { <table class="table table-bordered table-striped table-condensed sortable table-fixed"> <thead> <th>Pool Name</th> <th>Minimum Share</th> <th>Pool Weight</th> <th>Active Stages</th> <th>Running Tasks</th> <th>SchedulingMode</th> </thead> <tbody> {rows.map(r => makeRow(r, listener.poolToActiveStages))} </tbody> </table> } private def poolRow( p: Schedulable, poolToActiveStages: HashMap[String, HashMap[Int, StageInfo]]): Seq[Node] = { val activeStages = poolToActiveStages.get(p.name) match { case Some(stages) => stages.size case None => 0 } val href = "%s/stages/pool?poolname=%s" .format(UIUtils.prependBaseUri(parent.basePath), URLEncoder.encode(p.name, "UTF-8")) <tr> <td> <a href={href}>{p.name}</a> </td> <td>{p.minShare}</td> <td>{p.weight}</td> <td>{activeStages}</td> <td>{p.runningTasks}</td> <td>{p.schedulingMode}</td> </tr> } }
Example 31
Source File: PoolPage.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.jobs import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.scheduler.StageInfo import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class PoolPage(parent: StagesTab) extends WebUIPage("pool") { private val sc = parent.sc private val listener = parent.progressListener def render(request: HttpServletRequest): Seq[Node] = { listener.synchronized { val poolName = Option(request.getParameter("poolname")).map { poolname => UIUtils.decodeURLParameter(poolname) }.getOrElse { throw new IllegalArgumentException(s"Missing poolname parameter") } val poolToActiveStages = listener.poolToActiveStages val activeStages = poolToActiveStages.get(poolName) match { case Some(s) => s.values.toSeq case None => Seq[StageInfo]() } val shouldShowActiveStages = activeStages.nonEmpty val activeStagesTable = new StageTableBase(request, activeStages, "", "activeStage", parent.basePath, "stages/pool", parent.progressListener, parent.isFairScheduler, parent.killEnabled, isFailedStage = false) // For now, pool information is only accessible in live UIs val pools = sc.map(_.getPoolForName(poolName).getOrElse { throw new IllegalArgumentException(s"Unknown poolname: $poolName") }).toSeq val poolTable = new PoolTable(pools, parent) var content = <h4>Summary </h4> ++ poolTable.toNodeSeq if (shouldShowActiveStages) { content ++= <h4>{activeStages.size} Active Stages</h4> ++ activeStagesTable.toNodeSeq } UIUtils.headerSparkPage("Fair Scheduler Pool: " + poolName, content, parent) } } }
Example 32
Source File: PagedTableSuite.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.ui import scala.xml.Node import org.apache.spark.SparkFunSuite class PagedDataSourceSuite extends SparkFunSuite { test("basic") { val dataSource1 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource1.pageData(1) === PageData(3, (1 to 2))) val dataSource2 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource2.pageData(2) === PageData(3, (3 to 4))) val dataSource3 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource3.pageData(3) === PageData(3, Seq(5))) val dataSource4 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) val e1 = intercept[IndexOutOfBoundsException] { dataSource4.pageData(4) } assert(e1.getMessage === "Page 4 is out of range. Please select a page number between 1 and 3.") val dataSource5 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) val e2 = intercept[IndexOutOfBoundsException] { dataSource5.pageData(0) } assert(e2.getMessage === "Page 0 is out of range. Please select a page number between 1 and 3.") } } class PagedTableSuite extends SparkFunSuite { test("pageNavigation") { // Create a fake PagedTable to test pageNavigation val pagedTable = new PagedTable[Int] { override def tableId: String = "" override def tableCssClass: String = "" override def dataSource: PagedDataSource[Int] = null override def pageLink(page: Int): String = page.toString override def headers: Seq[Node] = Nil override def row(t: Int): Seq[Node] = Nil override def pageSizeFormField: String = "pageSize" override def prevPageSizeFormField: String = "prevPageSize" override def pageNumberFormField: String = "page" override def goButtonFormPath: String = "" } assert(pagedTable.pageNavigation(1, 10, 1) === Nil) assert( (pagedTable.pageNavigation(1, 10, 2).head \\ "li").map(_.text.trim) === Seq("1", "2", ">")) assert( (pagedTable.pageNavigation(2, 10, 2).head \\ "li").map(_.text.trim) === Seq("<", "1", "2")) assert((pagedTable.pageNavigation(1, 10, 100).head \\ "li").map(_.text.trim) === (1 to 10).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(2, 10, 100).head \\ "li").map(_.text.trim) === Seq("<") ++ (1 to 10).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(100, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 100).map(_.toString)) assert((pagedTable.pageNavigation(99, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 100).map(_.toString) ++ Seq(">")) assert((pagedTable.pageNavigation(11, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (11 to 20).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(93, 10, 97).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 97).map(_.toString) ++ Seq(">")) } } private[spark] class SeqPagedDataSource[T](seq: Seq[T], pageSize: Int) extends PagedDataSource[T](pageSize) { override protected def dataSize: Int = seq.size override protected def sliceData(from: Int, to: Int): Seq[T] = seq.slice(from, to) }
Example 33
Source File: StagePageSuite.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.ui import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.mockito.Mockito.{mock, when, RETURNS_SMART_NULLS} import org.apache.spark._ import org.apache.spark.executor.TaskMetrics import org.apache.spark.scheduler._ import org.apache.spark.storage.StorageStatusListener import org.apache.spark.ui.exec.ExecutorsListener import org.apache.spark.ui.jobs.{JobProgressListener, StagePage, StagesTab} import org.apache.spark.ui.scope.RDDOperationGraphListener class StagePageSuite extends SparkFunSuite with LocalSparkContext { private val peakExecutionMemory = 10 test("peak execution memory should displayed") { val conf = new SparkConf(false) val html = renderStagePage(conf).toString().toLowerCase val targetString = "peak execution memory" assert(html.contains(targetString)) } test("SPARK-10543: peak execution memory should be per-task rather than cumulative") { val conf = new SparkConf(false) val html = renderStagePage(conf).toString().toLowerCase // verify min/25/50/75/max show task value not cumulative values assert(html.contains(s"<td>$peakExecutionMemory.0 b</td>" * 5)) } private def renderStagePage(conf: SparkConf): Seq[Node] = { val jobListener = new JobProgressListener(conf) val graphListener = new RDDOperationGraphListener(conf) val executorsListener = new ExecutorsListener(new StorageStatusListener(conf), conf) val tab = mock(classOf[StagesTab], RETURNS_SMART_NULLS) val request = mock(classOf[HttpServletRequest]) when(tab.conf).thenReturn(conf) when(tab.progressListener).thenReturn(jobListener) when(tab.operationGraphListener).thenReturn(graphListener) when(tab.executorsListener).thenReturn(executorsListener) when(tab.appName).thenReturn("testing") when(tab.headerTabs).thenReturn(Seq.empty) when(request.getParameter("id")).thenReturn("0") when(request.getParameter("attempt")).thenReturn("0") val page = new StagePage(tab) // Simulate a stage in job progress listener val stageInfo = new StageInfo(0, 0, "dummy", 1, Seq.empty, Seq.empty, "details") // Simulate two tasks to test PEAK_EXECUTION_MEMORY correctness (1 to 2).foreach { taskId => val taskInfo = new TaskInfo(taskId, taskId, 0, 0, "0", "localhost", TaskLocality.ANY, false) jobListener.onStageSubmitted(SparkListenerStageSubmitted(stageInfo)) jobListener.onTaskStart(SparkListenerTaskStart(0, 0, taskInfo)) taskInfo.markFinished(TaskState.FINISHED) val taskMetrics = TaskMetrics.empty taskMetrics.incPeakExecutionMemory(peakExecutionMemory) jobListener.onTaskEnd( SparkListenerTaskEnd(0, 0, "result", Success, taskInfo, taskMetrics)) } jobListener.onStageCompleted(SparkListenerStageCompleted(stageInfo)) page.render(request) } }
Example 34
Source File: HistoryNotFoundPage.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master.ui import java.net.URLDecoder import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.ui.{UIUtils, WebUIPage} private[spark] class HistoryNotFoundPage(parent: MasterWebUI) extends WebUIPage("history/not-found") { def render(request: HttpServletRequest): Seq[Node] = { val titleParam = request.getParameter("title") val msgParam = request.getParameter("msg") val exceptionParam = request.getParameter("exception") // If no parameters are specified, assume the user did not enable event logging val defaultTitle = "Event logging is not enabled" val defaultContent = <div class="row-fluid"> <div class="span12" style="font-size:14px"> No event logs were found for this application! To <a href="http://spark.apache.org/docs/latest/monitoring.html">enable event logging</a>, set <span style="font-style:italic">spark.eventLog.enabled</span> to true and <span style="font-style:italic">spark.eventLog.dir</span> to the directory to which your event logs are written. </div> </div> val title = Option(titleParam).getOrElse(defaultTitle) val content = Option(msgParam) .map { msg => URLDecoder.decode(msg, "UTF-8") } .map { msg => <div class="row-fluid"> <div class="span12" style="font-size:14px">{msg}</div> </div> ++ Option(exceptionParam) .map { e => URLDecoder.decode(e, "UTF-8") } .map { e => <pre>{e}</pre> } .getOrElse(Seq.empty) }.getOrElse(defaultContent) UIUtils.basicSparkPage(content, title) } }
Example 35
Source File: ExecutorThreadDumpPage.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.exec import java.net.URLDecoder import javax.servlet.http.HttpServletRequest import scala.util.Try import scala.xml.{Text, Node} import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class ExecutorThreadDumpPage(parent: ExecutorsTab) extends WebUIPage("threadDump") { private val sc = parent.sc def render(request: HttpServletRequest): Seq[Node] = { val executorId = Option(request.getParameter("executorId")).map { executorId => // Due to YARN-2844, "<driver>" in the url will be encoded to "%25253Cdriver%25253E" when // running in yarn-cluster mode. `request.getParameter("executorId")` will return // "%253Cdriver%253E". Therefore we need to decode it until we get the real id. var id = executorId var decodedId = URLDecoder.decode(id, "UTF-8") while (id != decodedId) { id = decodedId decodedId = URLDecoder.decode(id, "UTF-8") } id }.getOrElse { throw new IllegalArgumentException(s"Missing executorId parameter") } val time = System.currentTimeMillis() val maybeThreadDump = sc.get.getExecutorThreadDump(executorId) val content = maybeThreadDump.map { threadDump => val dumpRows = threadDump.map { thread => <div class="accordion-group"> <div class="accordion-heading" onclick="$(this).next().toggleClass('hidden')"> <a class="accordion-toggle"> Thread {thread.threadId}: {thread.threadName} ({thread.threadState}) </a> </div> <div class="accordion-body hidden"> <div class="accordion-inner"> <pre>{thread.stackTrace}</pre> </div> </div> </div> } <div class="row-fluid"> <p>Updated at {UIUtils.formatDate(time)}</p> { // scalastyle:off <p><a class="expandbutton" onClick="$('.accordion-body').removeClass('hidden'); $('.expandbutton').toggleClass('hidden')"> Expand All </a></p> <p><a class="expandbutton hidden" onClick="$('.accordion-body').addClass('hidden'); $('.expandbutton').toggleClass('hidden')"> Collapse All </a></p> // scalastyle:on } <div class="accordion">{dumpRows}</div> </div> }.getOrElse(Text("Error fetching thread dump")) UIUtils.headerSparkPage(s"Thread dump for executor $executorId", content, parent) } }
Example 36
Source File: EnvironmentPage.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.env import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class EnvironmentPage(parent: EnvironmentTab) extends WebUIPage("") { private val listener = parent.listener def render(request: HttpServletRequest): Seq[Node] = { val runtimeInformationTable = UIUtils.listingTable( propertyHeader, jvmRow, listener.jvmInformation, fixedWidth = true) val sparkPropertiesTable = UIUtils.listingTable( propertyHeader, propertyRow, listener.sparkProperties, fixedWidth = true) val systemPropertiesTable = UIUtils.listingTable( propertyHeader, propertyRow, listener.systemProperties, fixedWidth = true) val classpathEntriesTable = UIUtils.listingTable( classPathHeaders, classPathRow, listener.classpathEntries, fixedWidth = true) val content = <span> <h4>Runtime Information</h4> {runtimeInformationTable} <h4>Spark Properties</h4> {sparkPropertiesTable} <h4>System Properties</h4> {systemPropertiesTable} <h4>Classpath Entries</h4> {classpathEntriesTable} </span> UIUtils.headerSparkPage("Environment", content, parent) } private def propertyHeader = Seq("Name", "Value") private def classPathHeaders = Seq("Resource", "Source") private def jvmRow(kv: (String, String)) = <tr><td>{kv._1}</td><td>{kv._2}</td></tr> private def propertyRow(kv: (String, String)) = <tr><td>{kv._1}</td><td>{kv._2}</td></tr> private def classPathRow(data: (String, String)) = <tr><td>{data._1}</td><td>{data._2}</td></tr> }
Example 37
Source File: StoragePage.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.storage import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.storage.RDDInfo import org.apache.spark.ui.{WebUIPage, UIUtils} import org.apache.spark.util.Utils private def rddRow(rdd: RDDInfo): Seq[Node] = { // scalastyle:off <tr> <td> <a href={"%s/storage/rdd?id=%s".format(UIUtils.prependBaseUri(parent.basePath), rdd.id)}> {rdd.name} </a> </td> <td>{rdd.storageLevel.description} </td> <td>{rdd.numCachedPartitions}</td> <td>{"%.0f%%".format(rdd.numCachedPartitions * 100.0 / rdd.numPartitions)}</td> <td sorttable_customkey={rdd.memSize.toString}>{Utils.bytesToString(rdd.memSize)}</td> <td sorttable_customkey={rdd.tachyonSize.toString}>{Utils.bytesToString(rdd.tachyonSize)}</td> <td sorttable_customkey={rdd.diskSize.toString} >{Utils.bytesToString(rdd.diskSize)}</td> </tr> // scalastyle:on } }
Example 38
Source File: PoolTable.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.jobs import scala.collection.mutable.HashMap import scala.xml.Node import org.apache.spark.scheduler.{Schedulable, StageInfo} import org.apache.spark.ui.UIUtils private[ui] class PoolTable(pools: Seq[Schedulable], parent: StagesTab) { private val listener = parent.listener def toNodeSeq: Seq[Node] = { listener.synchronized { poolTable(poolRow, pools) } } private def poolTable( makeRow: (Schedulable, HashMap[String, HashMap[Int, StageInfo]]) => Seq[Node], rows: Seq[Schedulable]): Seq[Node] = { <table class="table table-bordered table-striped table-condensed sortable table-fixed"> <thead> <th>Pool Name</th> <th>Minimum Share</th> <th>Pool Weight</th> <th>Active Stages</th> <th>Running Tasks</th> <th>SchedulingMode</th> </thead> <tbody> {rows.map(r => makeRow(r, listener.poolToActiveStages))} </tbody> </table> } private def poolRow( p: Schedulable, poolToActiveStages: HashMap[String, HashMap[Int, StageInfo]]): Seq[Node] = { val activeStages = poolToActiveStages.get(p.name) match { case Some(stages) => stages.size case None => 0 } val href = "%s/stages/pool?poolname=%s" .format(UIUtils.prependBaseUri(parent.basePath), p.name) <tr> <td> <a href={href}>{p.name}</a> </td> <td>{p.minShare}</td> <td>{p.weight}</td> <td>{activeStages}</td> <td>{p.runningTasks}</td> <td>{p.schedulingMode}</td> </tr> } }
Example 39
Source File: PoolPage.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.jobs import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.scheduler.{Schedulable, StageInfo} import org.apache.spark.ui.{WebUIPage, UIUtils} private[ui] class PoolPage(parent: StagesTab) extends WebUIPage("pool") { private val sc = parent.sc private val listener = parent.listener def render(request: HttpServletRequest): Seq[Node] = { listener.synchronized { val poolName = request.getParameter("poolname") require(poolName != null && poolName.nonEmpty, "Missing poolname parameter") val poolToActiveStages = listener.poolToActiveStages val activeStages = poolToActiveStages.get(poolName) match { case Some(s) => s.values.toSeq case None => Seq[StageInfo]() } val activeStagesTable = new StageTableBase(activeStages.sortBy(_.submissionTime).reverse, parent.basePath, parent.listener, isFairScheduler = parent.isFairScheduler, killEnabled = parent.killEnabled) // For now, pool information is only accessible in live UIs val pools = sc.map(_.getPoolForName(poolName).get).toSeq val poolTable = new PoolTable(pools, parent) val content = <h4>Summary </h4> ++ poolTable.toNodeSeq ++ <h4>{activeStages.size} Active Stages</h4> ++ activeStagesTable.toNodeSeq UIUtils.headerSparkPage("Fair Scheduler Pool: " + poolName, content, parent) } } }
Example 40
Source File: XmlSerialization.scala From ike with Apache License 2.0 | 5 votes |
package org.allenai.ike.index import scala.xml.{ Elem, Node, Text } object XmlSerialization { def xml(text: IndexableText): Elem = { val children = addSpaces(text.sentences map xml) <document>{ children }</document> } def xml(tokens: Seq[IndexableToken]): Elem = { val children = addSpaces(tokens map xml) <sentence>{ children }</sentence> } def xml(token: IndexableToken): Elem = <word pos={ token.pos } lemma={ token.lemma } chunk={ token.chunk }>{ token.word }</word> def addSpaces(elems: Seq[Elem]): Seq[Node] = { val n = elems.size val spaces = List.fill(n)(Text(" ")) for { (elem, space) <- elems.zip(spaces) node <- List(elem, space) } yield node } }
Example 41
Source File: CustomPage.scala From cuesheet with Apache License 2.0 | 5 votes |
package org.apache.spark.ui import javax.servlet.http.HttpServletRequest import scala.xml.Node abstract class CustomPage(val name: String) extends WebUIPage("") { private[ui] var tab: SparkUITab = _ private[ui] var title: String = _ private[ui] var ui: SparkUI = _ def attachRedirectHandler( srcPath: String, destPath: String, beforeRedirect: HttpServletRequest => Unit = x => (), basePath: String = "", httpMethods: Set[String] = Set("GET")): Unit = { // Can't use Jetty interface as it is shaded to org.spark-project; use reflection instead val createRedirectHandler = JettyUtils.getClass.getMethods.filter(_.getName == "createRedirectHandler").head val handler = createRedirectHandler.invoke(JettyUtils, srcPath, destPath, beforeRedirect, basePath, httpMethods) val attachHandler = ui.getClass.getMethods.filter(_.getName == "attachHandler").head attachHandler.invoke(ui, handler) } def renderPage(request: HttpServletRequest): Seq[Node] final override def render(request: HttpServletRequest): Seq[Node] = { val content = renderPage(request) UIUtils.headerSparkPage(title, content, tab, Some(5000)) } }
Example 42
Source File: PluginMetadata.scala From chatoverflow with Eclipse Public License 2.0 | 5 votes |
package org.codeoverflow.chatoverflow.build.plugins import scala.xml.Node def toXML: List[Node] = { // Map of tag names to variables. Add new vars here and in the constructor. Map( "description" -> description, "licence" -> license, "website" -> website, "sourceRepo" -> sourceRepo, "bugtracker" -> bugtracker ).filter(_._2.nonEmpty) // filters not specified options .map(entry => { // just dummy tag name, replaced afterwards <value> {entry._2} </value>.copy(label = entry._1) // update the tag name with the correct one }).toList } }
Example 43
Source File: ExternalVariablesLoader.scala From incubator-daffodil with Apache License 2.0 | 5 votes |
package org.apache.daffodil.externalvars import scala.xml.parsing.ConstructingParser import java.io.File import java.net.URI import scala.xml.Node import scala.io.Codec.string2codec import org.apache.daffodil.processors.{ VariableUtils, VariableMap } import org.apache.daffodil.exceptions.Assert import org.apache.daffodil.util.Misc._ import org.apache.daffodil.exceptions.ThrowsSDE import scala.collection.immutable.Queue object ExternalVariablesLoader { def loadVariables(bindings: Seq[Binding], referringContext: ThrowsSDE, vmap: VariableMap): VariableMap = { Assert.usage(referringContext != null, "loadVariables expects 'referringContext' to not be null!") VariableUtils.setExternalVariables(vmap, bindings, referringContext) vmap } // The following are methods that retrieve and transform variables into Seq[Binding] def mapToBindings(vars: Map[String, String]): Queue[Binding] = { val varsKVP = vars.map { case (name, value) => { Binding(name, value) } } Queue.empty.enqueue(varsKVP) } def uriToBindings(uri: URI): Queue[Binding] = { Assert.usage(uri ne null) val file = new File(uri) fileToBindings(file) } def fileToBindings(file: File): Queue[Binding] = { Assert.usage(file ne null) ExternalVariablesValidator.validate(file) match { case Left(ex) => Assert.abort(ex) case Right(_) => // Success } val enc = determineEncoding(file) // The encoding is needed for ConstructingParser val input = scala.io.Source.fromURI(file.toURI)(enc) val node = ConstructingParser.fromSource(input, true).document.docElem nodeToBindings(node) } def nodeToBindings(node: Node): Queue[Binding] = { Assert.usage(node ne null) val newBindings = Binding.getBindings(node) var res = Queue.empty[Binding] // couldn't get the enqueue(iterable) method overload to resolve. // So just doing this one by one newBindings.foreach{ b => res = res.enqueue(b) } res } }
Example 44
Source File: ElementRef.scala From incubator-daffodil with Apache License 2.0 | 5 votes |
package org.apache.daffodil.dsom import scala.xml.Node import org.apache.daffodil.xml._ import org.apache.daffodil.dpath.NodeInfo override def namedQName: NamedQName = LV('namedQName) { referencedElement.namedQName }.value override lazy val name = refQName.local override lazy val prefix = refQName.prefix.getOrElse(null) // Need to go get the Element we are referencing lazy val referencedElement: GlobalElementDecl = LV('referencedElement) { val ged = this.schemaSet.getGlobalElementDecl(refQName) val res = ged match { case None => { // // this element ref refers to something not found. // // That might be because the QName namespace prefix is no good, or // because there is no element with that global name. // // Can't use namedQName because that's the resolved one // must use the refQName // SDE("Referenced element not found: %s.", this.refQName) } case Some(x) => x } res }.value override lazy val namespace = refQName.namespace override lazy val diagnosticDebugName = "element reference " + refQName override def typeDef = referencedElement.typeDef }
Example 45
Source File: Include.scala From incubator-daffodil with Apache License 2.0 | 5 votes |
package org.apache.daffodil.dsom import scala.xml.Node import org.apache.daffodil.util._ import IIUtils._ import org.apache.daffodil.oolag.OOLAG final class Include(xml: Node, xsd: XMLSchemaDocument, seenArg: IIMap) extends IIBase(xml, xsd, seenArg) { protected final def mapPair = LV('mapPair) { // for an include, the targetNamespace of the schema document that contained us is right. val mp = (targetNamespace, resolvedLocation) mp }.value private lazy val slText = schemaLocationProperty.get // include always has a schemaLocation property lazy val resolvedNamespaceURI = None // include doesn't have a namespace. // include always has a schemaLocation lazy val resolvedLocation = LV('resolvedLocation) { resolvedSchemaLocation match { case Some(rsl) => { val ns = OOLAG.keepGoing( schemaDefinitionError("Unable to determine target namespace.")) { xsd.targetNamespace } log(LogLevel.Debug, "Included schema from %s into namespace %s.", rsl, ns) rsl } case None => schemaDefinitionError("Included schema not found at location %s. %s", slText, whereSearched) } }.value }
Example 46
Source File: GlobalElementDecl.scala From incubator-daffodil with Apache License 2.0 | 5 votes |
package org.apache.daffodil.dsom import scala.xml.Node final class GlobalElementDecl( xmlArg: Node, schemaDocument: SchemaDocument) extends AnnotatedSchemaComponentImpl(xmlArg, schemaDocument) with GlobalElementComponentMixin with ElementDeclMixin with NestingLexicalMixin // Needed to check some properties are // NOT on global element decls such as dfdl:choiceBranchKey with ResolvesLocalProperties { lazy val asRoot = new Root(xml, schemaDocument, namedQName, this) requiredEvaluationsIfActivated(validateChoiceBranchKey) private def validateChoiceBranchKey(): Unit = { // Ensure that the global element decl does not have choiceBranchKey set. val found = findPropertyOption("choiceBranchKey") if (found.isDefined) { SDE("dfdl:choiceBranchKey cannot be specified on a global element declaration") } } }
Example 47
Source File: DFDLAnnotation.scala From incubator-daffodil with Apache License 2.0 | 5 votes |
package org.apache.daffodil.dsom import scala.xml.Node import org.apache.daffodil.util.Misc abstract class DFDLAnnotation(xmlArg: Node, annotatedSCArg: AnnotatedSchemaComponent) extends SchemaComponent with NestingLexicalMixin { final override val xml = xmlArg final override val optLexicalParent = Option(annotatedSCArg) final lazy val annotatedSC = annotatedSCArg override def toString = diagnosticDebugName override lazy val diagnosticDebugName: String = { val cn = Misc.getNameFromClass(this) val n = if (cn.startsWith("DFDL")) { val nn = cn.replaceFirst("DFDL", "") "dfdl:" + Misc.initialLowerCase(nn) } else { cn } n // + "(" + annotatedSC.path + ")" } }
Example 48
Source File: LocalElementDecl.scala From incubator-daffodil with Apache License 2.0 | 5 votes |
package org.apache.daffodil.dsom import scala.xml.Node sealed abstract class LocalElementDeclBase( final override val xml: Node, final override val optLexicalParent: Option[SchemaComponent], final override val position: Int) extends ElementBase with LocalElementComponentMixin with ElementDeclMixin with NestingLexicalMixin { requiredEvaluationsIfActivated(minOccurs) requiredEvaluationsIfActivated(maxOccurs) } class LocalElementDecl( xml: Node, lexicalParent: SchemaComponent, position: Int) extends LocalElementDeclBase(xml, Option(lexicalParent), position) sealed abstract class QuasiElementDeclBase( xml: Node, lexicalParent: SchemaComponent) extends LocalElementDeclBase(xml, Option(lexicalParent), -1) { override lazy val isQuasiElement = true } class PrefixLengthQuasiElementDecl( xml: Node, lexicalParent: SchemaComponent) extends QuasiElementDeclBase(xml, lexicalParent) { } class RepTypeQuasiElementDecl( xml: Node, lexicalParent: SchemaComponent) extends QuasiElementDeclBase(xml, lexicalParent) { }
Example 49
Source File: ComplexTypes.scala From incubator-daffodil with Apache License 2.0 | 5 votes |
package org.apache.daffodil.dsom import scala.xml.Node import org.apache.daffodil.dpath.NodeInfo import org.apache.daffodil.api.WarnID import scala.xml.Text import scala.xml.Comment sealed abstract class ComplexTypeBase(xmlArg: Node, parentArg: SchemaComponent) extends SchemaComponentImpl(xmlArg, parentArg) with TypeBase with NonPrimTypeMixin { final override def optRestriction = None final override def optUnion = None final override def typeNode = NodeInfo.Complex requiredEvaluationsIfActivated(modelGroup) final def group = modelGroup final def sequence = group.asInstanceOf[Sequence] final def choice = group.asInstanceOf[Choice] private lazy val <complexType>{ xmlChildren @ _* }</complexType> = xml final lazy val Seq(modelGroup) = { val s = smg schemaDefinitionUnless(s.length == 1, "A complex type must have exactly one model-group element child which is a sequence, choice, or group reference.") s } private lazy val smg = { childrenForTerms.map { xmlChild => ModelGroupFactory(xmlChild, this, 1, false) } } private lazy val childrenForTerms = { xmlChildren.flatMap { xmlChild => { xmlChild match { case <annotation>{ annotationChildren @ _* }</annotation> => { val dais = annotationChildren.find { ai => ai.attribute("source") match { case Some(n) => n.text.contains("ogf") && n.text.contains("dfdl") case _ => false } } if (dais != None) { this.SDW(WarnID.InvalidAnnotationPoint, "complexType is not a valid annotation point. Annotation ignored.") } None } case textNode: Text => None case _: Comment => None case _ => Some(xmlChild) } } } } } final class GlobalComplexTypeDef( xmlArg: Node, schemaDocumentArg: SchemaDocument) extends ComplexTypeBase(xmlArg, schemaDocumentArg) with GlobalNonElementComponentMixin with NestingLexicalMixin { // Nothing needed here. The base class and mixins are providing all the functionality needed. } final class LocalComplexTypeDef(xmlArg: Node, val elementDecl: ElementDeclMixin) extends ComplexTypeBase(xmlArg, elementDecl) with LocalNonElementComponentMixin with NestingLexicalMixin { // Nothing needed here. The base class and mixins are providing all the functionality needed. }
Example 50
Source File: TestInputValueCalc.scala From incubator-daffodil with Apache License 2.0 | 5 votes |
package org.apache.daffodil.dsom import org.apache.daffodil.util._ import org.junit.Test import scala.xml.Node import org.junit.Test class TestInputValueCalc extends Logging { // @Test @Test def testInputValueCalc1(): Unit = { val testSchema = SchemaUtils.dfdlTestSchema( <xs:include schemaLocation="org/apache/daffodil/xsd/DFDLGeneralFormat.dfdl.xsd"/>, <dfdl:format ref="tns:GeneralFormat"/>, <xs:element name="data" type="xs:string" dfdl:inputValueCalc="{ xs:string(42) }" />) val (_, actual) = TestUtils.testString(testSchema, "") val expected: Node = <data>42</data> TestUtils.assertEqualsXMLElements(expected, actual) } // @Test @Test def testInputValueCalcString2(): Unit = { val testSchema = SchemaUtils.dfdlTestSchema( <xs:include schemaLocation="org/apache/daffodil/xsd/DFDLGeneralFormat.dfdl.xsd"/>, <dfdl:format ref="tns:GeneralFormat" encoding="ascii"/>, <xs:element name="data"> <xs:complexType> <xs:sequence> <xs:element name="e1" type="xs:string" dfdl:lengthKind="explicit" dfdl:length="1"/> <xs:element name="e2" type="xs:string" dfdl:inputValueCalc="{ ../tns:e1 }"/> </xs:sequence> </xs:complexType> </xs:element>) val (_, actual) = TestUtils.testString(testSchema, "A") val expected: Node = <data><e1>A</e1><e2>A</e2></data> TestUtils.assertEqualsXMLElements(expected, actual) } // @Test @Test def testInputValueCalcInt3(): Unit = { val testSchema = SchemaUtils.dfdlTestSchema( <xs:include schemaLocation="org/apache/daffodil/xsd/DFDLGeneralFormat.dfdl.xsd"/>, <dfdl:format ref="tns:GeneralFormat" encoding="ascii"/>, <xs:element name="data"> <xs:complexType> <xs:sequence> <xs:element name="e1" type="xs:int" dfdl:lengthKind="explicit" dfdl:length="1"/> <xs:element name="e2" type="xs:int" dfdl:inputValueCalc="{ ../tns:e1 }"/> </xs:sequence> </xs:complexType> </xs:element>) val (_, actual) = TestUtils.testString(testSchema, "8") val expected: Node = <data><e1>8</e1><e2>8</e2></data> TestUtils.assertEqualsXMLElements(expected, actual) } }
Example 51
Source File: TestPropertyScoping.scala From incubator-daffodil with Apache License 2.0 | 5 votes |
package org.apache.daffodil.dsom import scala.xml.Node import org.junit.Test import org.junit.Assert._ import org.apache.daffodil.util.LogLevel import org.apache.daffodil.util.Fakes class HasProps(xml: Node) extends DFDLFormatAnnotation(xml, Fakes.fakeElem) class TestPropertyScoping { val x1 = new HasProps(<fake alignmentUnits="bytes"/>) @Test def test1(): Unit = { LogLevel // println(x1.formatRefs) // println(x1.shortFormProperties) // println(x1.longFormProperties) // println(x1.elementFormProperties) assertTrue(x1.verifyPropValue("alignmentUnits", "bytes")) } }
Example 52
Source File: filter_errorcode.scala From scalabpe with Apache License 2.0 | 5 votes |
package scalabpe.plugin import scala.collection.mutable.HashMap import scala.xml.Node import scalabpe.core.DummyActor import scalabpe.core.HashMapStringAny import scalabpe.core.Logging import scalabpe.core.Request import scalabpe.core.Response import scalabpe.core.ResponseFilter import scalabpe.core.Router class ErrorCodeDefine(val resultCodeName: String, val resultMsgName: String); class ErrorDescResponseFilter(val router: Router, val cfgNode: Node) extends ResponseFilter with Logging { val cfgs = new HashMap[Int, ErrorCodeDefine]() var localCacheServiceId = 0 val dummyActor = new DummyActor() init def init() { var s = (cfgNode \ "@localCacheServiceId").toString if (s != "") localCacheServiceId = s.toInt val serviceNodes = (cfgNode \ "Service") for (p <- serviceNodes) { val serviceId = (p \ "@serviceId").toString.toInt val resultCodeName = (p \ "@resultCodeField").toString val resultMsgName = (p \ "@resultMsgField").toString cfgs.put(serviceId, new ErrorCodeDefine(resultCodeName, resultMsgName)) // log.info("serviceId=%d,resultCodeName=%s,resultMsgName=%s".format(serviceId,resultCodeName,resultMsgName)) } log.info("errorcode response filter created") } def filter(res: Response, req: Request): Unit = { // log.info("error response filter called, res={}",res.toString) val rd = cfgs.getOrElse(res.serviceId, null) if (rd == null) return if (rd.resultCodeName != "") { if (res.body.getOrElse(rd.resultCodeName, null) == null) { res.body.put(rd.resultCodeName, res.code) } } if (res.code == 0) return if (rd.resultMsgName == "") return if (res.body.getOrElse(rd.resultMsgName, null) != null) return val body = new HashMapStringAny() body.put("resultCode", res.code) val req = new Request( res.requestId + ":$", Router.DO_NOT_REPLY, res.sequence, res.encoding, localCacheServiceId, 1, new HashMapStringAny(), body, dummyActor) val invokeResult = router.send(req) if (invokeResult == null) return val resultMsg = invokeResult.s("resultMsg", "") if (resultMsg != "") res.body.put(rd.resultMsgName, resultMsg) } }
Example 53
Source File: regdishook_sample.scala From scalabpe with Apache License 2.0 | 5 votes |
package scalabpe.plugin import scala.xml.Node import scalabpe.core.Closable import scalabpe.core.Logging import scalabpe.core.RegDisHook import scalabpe.core.Router class RegDisHookSample(val router: Router, val cfgNode: Node) extends Logging with RegDisHook with Closable { log.info("regdis hook created") def updateXml(xml: String): String = { log.info("updateXml called") xml } def close(): Unit = { log.info("regdis hook closed") } }
Example 54
Source File: DruidQueriesPage.scala From spark-druid-olap with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.hive.thriftserver.sparklinedata.ui import javax.servlet.http.HttpServletRequest import org.apache.spark.sql.SPLLogging import org.apache.spark.ui.{UIUtils, WebUIPage} import org.sparklinedata.druid.metadata.{DruidQueryExecutionView, DruidQueryHistory} import scala.xml.Node private[ui] class DruidQueriesPage(parent: DruidQueriesTab) extends WebUIPage("") with SPLLogging { def render(request: HttpServletRequest): Seq[Node] = { val content = generateDruidStatsTable() UIUtils.headerSparkPage("Druid Query Details", content, parent, Some(5000)) } private def generateDruidStatsTable(): Seq[Node] = { val numStatement = DruidQueryHistory.getHistory.size val table = if (numStatement > 0) { val headerRow = Seq("stageId", "partitionId", "taskAttemptId", "druidQueryServer", "druidSegIntervals", "startTime", "druidExecTime", "queryExecTime", "numRows", "druidQuery", "sql") val druidContent = DruidQueryHistory.getHistory def generateDataRow(info: DruidQueryExecutionView): Seq[Node] = { var interval = "" for(temp <- info.druidSegIntervals){ interval += temp } val stageLink = "%s/stages/stage?id=%s&attempt=0" .format(UIUtils.prependBaseUri(parent.basePath), info.stageId) <tr> <td><a href={stageLink}> {info.stageId} </a></td> <td> {info.partitionId} </td> <td>{info.taskAttemptId}</td> <td>{info.druidQueryServer}</td> <td>{interval}</td> <td>{info.startTime}</td> <td>{info.druidExecTime}</td> <td>{info.queryExecTime}</td> <td>{info.numRows}</td> <td>{info.druidQuery}</td> <td>{info.sqlStmt.getOrElse("none")}</td> </tr> } Some(UIUtils.listingTable(headerRow, generateDataRow, druidContent, false, None, Seq(null), false)) } else { None } val content = <h5 id="sqlstat">Druid Query Details</h5> ++ <div> <ul class="unstyled"> {table.getOrElse("No queries have been executed yet.")} </ul> </div> content } }
Example 55
Source File: WebCrawler.scala From CSYE7200_Old with MIT License | 5 votes |
package edu.neu.coe.csye7200.asstwc import java.net.URL import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent._ import scala.concurrent.duration._ import scala.io.Source import scala.language.postfixOps import scala.util._ import scala.xml.Node object WebCrawler extends App { def getURLContent(u: URL): Future[String] = { for { source <- Future(Source.fromURL(u)) } yield source mkString } def wget(u: URL): Future[Seq[URL]] = { // TO BE IMPLEMENTED implement. 16 points. Hint: write as a for-comprehension, using the constructor new URL(URL,String) to get the appropriate URL for relative links def getURLs(ns: Node): Seq[URL] = ??? def getLinks(g: String): Try[Seq[URL]] = for (n <- HTMLParser.parse(g) recoverWith { case f => Failure(new RuntimeException(s"parse problem with URL $u: $f")) }) yield getURLs(n) // TO BE IMPLEMENTED implement. 9 points. Hint: write as a for-comprehension, using getURLContent (above) and getLinks above. You might also need MonadOps.asFuture ??? } def wget(us: Seq[URL]): Future[Seq[Either[Throwable, Seq[URL]]]] = { val us2 = us.distinct take 10 // TO BE IMPLEMENTED implement the rest of this, based on us2 instead of us. 15 points. // Hint: Use wget(URL) (above). MonadOps.sequence and Future.sequence are also available to you to use. ??? } def crawler(depth: Int, args: Seq[URL]): Future[Seq[URL]] = { def inner(urls: Seq[URL], depth: Int, accum: Seq[URL]): Future[Seq[URL]] = if (depth > 0) for (us <- MonadOps.flattenRecover(wget(urls), { x => System.err.println(x) }); r <- inner(us, depth - 1, accum ++: urls)) yield r else Future.successful(accum) inner(args, depth, Nil) } println(s"web reader: ${args.toList}") val urls = for (arg <- args toList) yield Try(new URL(arg)) val s = MonadOps.sequence(urls) s match { case Success(z) => println(s"invoking crawler on $z") val f = crawler(2, z) Await.ready(f, Duration("60 second")) for (x <- f) println(s"Links: $x") case Failure(z) => println(s"failure: $z") } }
Example 56
Source File: HistoryPage.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.history import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.ui.{UIUtils, WebUIPage} private[history] class HistoryPage(parent: HistoryServer) extends WebUIPage("") { def render(request: HttpServletRequest): Seq[Node] = { val requestedIncomplete = Option(request.getParameter("showIncomplete")).getOrElse("false").toBoolean val allAppsSize = parent.getApplicationList().count(_.completed != requestedIncomplete) val eventLogsUnderProcessCount = parent.getEventLogsUnderProcess() val lastUpdatedTime = parent.getLastUpdatedTime() val providerConfig = parent.getProviderConfig() val content = <script src={UIUtils.prependBaseUri("/static/historypage-common.js")}></script> <div> <div class="span12"> <ul class="unstyled"> {providerConfig.map { case (k, v) => <li><strong>{k}:</strong> {v}</li> }} </ul> { if (eventLogsUnderProcessCount > 0) { <p>There are {eventLogsUnderProcessCount} event log(s) currently being processed which may result in additional applications getting listed on this page. Refresh the page to view updates. </p> } } { if (lastUpdatedTime > 0) { <p>Last updated: <span id="last-updated">{lastUpdatedTime}</span></p> } } { if (allAppsSize > 0) { <script src={UIUtils.prependBaseUri("/static/dataTables.rowsGroup.js")}></script> ++ <div id="history-summary" class="span12 pagination"></div> ++ <script src={UIUtils.prependBaseUri("/static/utils.js")}></script> ++ <script src={UIUtils.prependBaseUri("/static/historypage.js")}></script> ++ <script>setAppLimit({parent.maxApplications})</script> } else if (requestedIncomplete) { <h4>No incomplete applications found!</h4> } else if (eventLogsUnderProcessCount > 0) { <h4>No completed applications found!</h4> } else { <h4>No completed applications found!</h4> ++ parent.emptyListingHtml } } <a href={makePageLink(!requestedIncomplete)}> { if (requestedIncomplete) { "Back to completed applications" } else { "Show incomplete applications" } } </a> </div> </div> UIUtils.basicSparkPage(content, "History Server", true) } private def makePageLink(showIncomplete: Boolean): String = { UIUtils.prependBaseUri("/?" + "showIncomplete=" + showIncomplete) } }
Example 57
Source File: ExecutorsPage.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.exec import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.status.api.v1.ExecutorSummary import org.apache.spark.ui.{UIUtils, WebUIPage} // This isn't even used anymore -- but we need to keep it b/c of a MiMa false positive private[ui] case class ExecutorSummaryInfo( id: String, hostPort: String, rddBlocks: Int, memoryUsed: Long, diskUsed: Long, activeTasks: Int, failedTasks: Int, completedTasks: Int, totalTasks: Int, totalDuration: Long, totalInputBytes: Long, totalShuffleRead: Long, totalShuffleWrite: Long, maxMemory: Long, executorLogs: Map[String, String]) private[ui] class ExecutorsPage( parent: ExecutorsTab, threadDumpEnabled: Boolean) extends WebUIPage("") { private val listener = parent.listener def render(request: HttpServletRequest): Seq[Node] = { val content = <div> { <div id="active-executors"></div> ++ <script src={UIUtils.prependBaseUri("/static/utils.js", sparkUser = parent.sparkUser)}></script> ++ <script src={UIUtils.prependBaseUri("/static/executorspage.js", sparkUser = parent.sparkUser)}></script> ++ <script>setThreadDumpEnabled({threadDumpEnabled})</script> } </div>; UIUtils.headerSparkPage("Executors", content, parent, useDataTables = true) } } private[spark] object ExecutorsPage { def getExecInfo( listener: ExecutorsListener, statusId: Int, isActive: Boolean): ExecutorSummary = { val status = if (isActive) { listener.activeStorageStatusList(statusId) } else { listener.deadStorageStatusList(statusId) } val execId = status.blockManagerId.executorId val hostPort = status.blockManagerId.hostPort val rddBlocks = status.numBlocks val memUsed = status.memUsed val maxMem = status.maxMem val diskUsed = status.diskUsed val taskSummary = listener.executorToTaskSummary.getOrElse(execId, ExecutorTaskSummary(execId)) new ExecutorSummary( execId, hostPort, isActive, rddBlocks, memUsed, diskUsed, taskSummary.totalCores, taskSummary.tasksMax, taskSummary.tasksActive, taskSummary.tasksFailed, taskSummary.tasksComplete, taskSummary.tasksActive + taskSummary.tasksFailed + taskSummary.tasksComplete, taskSummary.duration, taskSummary.jvmGCTime, taskSummary.inputBytes, taskSummary.shuffleRead, taskSummary.shuffleWrite, maxMem, taskSummary.executorLogs ) } }
Example 58
Source File: ExecutorThreadDumpPage.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.exec import javax.servlet.http.HttpServletRequest import scala.xml.{Node, Text} import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class ExecutorThreadDumpPage(parent: ExecutorsTab) extends WebUIPage("threadDump") { private val sc = parent.sc def render(request: HttpServletRequest): Seq[Node] = { val executorId = Option(request.getParameter("executorId")).map { executorId => UIUtils.decodeURLParameter(executorId) }.getOrElse { throw new IllegalArgumentException(s"Missing executorId parameter") } val time = System.currentTimeMillis() val maybeThreadDump = sc.get.getExecutorThreadDump(executorId) val content = maybeThreadDump.map { threadDump => val dumpRows = threadDump.sortWith { case (threadTrace1, threadTrace2) => val v1 = if (threadTrace1.threadName.contains("Executor task launch")) 1 else 0 val v2 = if (threadTrace2.threadName.contains("Executor task launch")) 1 else 0 if (v1 == v2) { threadTrace1.threadName.toLowerCase < threadTrace2.threadName.toLowerCase } else { v1 > v2 } }.map { thread => val threadId = thread.threadId val blockedBy = thread.blockedByThreadId match { case Some(blockedByThreadId) => <div> Blocked by <a href={s"#${thread.blockedByThreadId}_td_id"}> Thread {thread.blockedByThreadId} {thread.blockedByLock}</a> </div> case None => Text("") } val heldLocks = thread.holdingLocks.mkString(", ") <tr id={s"thread_${threadId}_tr"} class="accordion-heading" onclick={s"toggleThreadStackTrace($threadId, false)"} onmouseover={s"onMouseOverAndOut($threadId)"} onmouseout={s"onMouseOverAndOut($threadId)"}> <td id={s"${threadId}_td_id"}>{threadId}</td> <td id={s"${threadId}_td_name"}>{thread.threadName}</td> <td id={s"${threadId}_td_state"}>{thread.threadState}</td> <td id={s"${threadId}_td_locking"}>{blockedBy}{heldLocks}</td> <td id={s"${threadId}_td_stacktrace"} class="hidden">{thread.stackTrace}</td> </tr> } <div class="row-fluid"> <p>Updated at {UIUtils.formatDate(time)}</p> { // scalastyle:off <p><a class="expandbutton" onClick="expandAllThreadStackTrace(true)"> Expand All </a></p> <p><a class="expandbutton hidden" onClick="collapseAllThreadStackTrace(true)"> Collapse All </a></p> <div class="form-inline"> <div class="bs-example" data-example-id="simple-form-inline"> <div class="form-group"> <div class="input-group"> Search: <input type="text" class="form-control" id="search" oninput="onSearchStringChange()"></input> </div> </div> </div> </div> <p></p> // scalastyle:on } <table class={UIUtils.TABLE_CLASS_STRIPED + " accordion-group" + " sortable"}> <thead> <th onClick="collapseAllThreadStackTrace(false)">Thread ID</th> <th onClick="collapseAllThreadStackTrace(false)">Thread Name</th> <th onClick="collapseAllThreadStackTrace(false)">Thread State</th> <th onClick="collapseAllThreadStackTrace(false)">Thread Locks</th> </thead> <tbody>{dumpRows}</tbody> </table> </div> }.getOrElse(Text("Error fetching thread dump")) UIUtils.headerSparkPage(s"Thread dump for executor $executorId", content, parent) } }
Example 59
Source File: EnvironmentPage.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.env import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class EnvironmentPage(parent: EnvironmentTab) extends WebUIPage("") { private val listener = parent.listener private def removePass(kv: (String, String)): (String, String) = { if (kv._1.toLowerCase.contains("password") || kv._1.toLowerCase.contains("secret")) { (kv._1, "******") } else kv } def render(request: HttpServletRequest): Seq[Node] = { val runtimeInformationTable = UIUtils.listingTable( propertyHeader, jvmRow, listener.jvmInformation, fixedWidth = true) val sparkPropertiesTable = UIUtils.listingTable( propertyHeader, propertyRow, listener.sparkProperties.map(removePass), fixedWidth = true) val systemPropertiesTable = UIUtils.listingTable( propertyHeader, propertyRow, listener.systemProperties, fixedWidth = true) val classpathEntriesTable = UIUtils.listingTable( classPathHeaders, classPathRow, listener.classpathEntries, fixedWidth = true) val content = <span> <h4>Runtime Information</h4> {runtimeInformationTable} <h4>Spark Properties</h4> {sparkPropertiesTable} <h4>System Properties</h4> {systemPropertiesTable} <h4>Classpath Entries</h4> {classpathEntriesTable} </span> UIUtils.headerSparkPage("Environment", content, parent) } private def propertyHeader = Seq("Name", "Value") private def classPathHeaders = Seq("Resource", "Source") private def jvmRow(kv: (String, String)) = <tr><td>{kv._1}</td><td>{kv._2}</td></tr> private def propertyRow(kv: (String, String)) = <tr><td>{kv._1}</td><td>{kv._2}</td></tr> private def classPathRow(data: (String, String)) = <tr><td>{data._1}</td><td>{data._2}</td></tr> }
Example 60
Source File: PoolTable.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.jobs import java.net.URLEncoder import scala.collection.mutable.HashMap import scala.xml.Node import org.apache.spark.scheduler.{Schedulable, StageInfo} import org.apache.spark.ui.UIUtils private[ui] class PoolTable(pools: Seq[Schedulable], parent: StagesTab) { private val listener = parent.progressListener def toNodeSeq: Seq[Node] = { listener.synchronized { poolTable(poolRow, pools) } } private def poolTable( makeRow: (Schedulable, HashMap[String, HashMap[Int, StageInfo]]) => Seq[Node], rows: Seq[Schedulable]): Seq[Node] = { <table class="table table-bordered table-striped table-condensed sortable table-fixed"> <thead> <th>Pool Name</th> <th>Minimum Share</th> <th>Pool Weight</th> <th>Active Stages</th> <th>Running Tasks</th> <th>SchedulingMode</th> </thead> <tbody> {rows.map(r => makeRow(r, listener.poolToActiveStages))} </tbody> </table> } private def poolRow( p: Schedulable, poolToActiveStages: HashMap[String, HashMap[Int, StageInfo]]): Seq[Node] = { val activeStages = poolToActiveStages.get(p.name) match { case Some(stages) => stages.size case None => 0 } val href = "%s/stages/pool?poolname=%s" .format(UIUtils.prependBaseUri(parent.basePath, sparkUser = parent.sparkUser), URLEncoder.encode(p.name, "UTF-8")) <tr> <td> <a href={href}>{p.name}</a> </td> <td>{p.minShare}</td> <td>{p.weight}</td> <td>{activeStages}</td> <td>{p.runningTasks}</td> <td>{p.schedulingMode}</td> </tr> } }
Example 61
Source File: PoolPage.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.jobs import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.scheduler.StageInfo import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class PoolPage(parent: StagesTab) extends WebUIPage("pool") { private val sc = parent.sc private val listener = parent.progressListener def render(request: HttpServletRequest): Seq[Node] = { listener.synchronized { val poolName = Option(request.getParameter("poolname")).map { poolname => UIUtils.decodeURLParameter(poolname) }.getOrElse { throw new IllegalArgumentException(s"Missing poolname parameter") } val poolToActiveStages = listener.poolToActiveStages val activeStages = poolToActiveStages.get(poolName) match { case Some(s) => s.values.toSeq case None => Seq[StageInfo]() } val shouldShowActiveStages = activeStages.nonEmpty val activeStagesTable = new StageTableBase(request, activeStages, "", "activeStage", parent.basePath, "stages/pool", parent.progressListener, parent.isFairScheduler, parent.killEnabled, isFailedStage = false, parent.sparkUser) // For now, pool information is only accessible in live UIs val pools = sc.map(_.getPoolForName(poolName).getOrElse { throw new IllegalArgumentException(s"Unknown poolname: $poolName") }).toSeq val poolTable = new PoolTable(pools, parent) var content = <h4>Summary </h4> ++ poolTable.toNodeSeq if (shouldShowActiveStages) { content ++= <h4>{activeStages.size} Active Stages</h4> ++ activeStagesTable.toNodeSeq } UIUtils.headerSparkPage("Fair Scheduler Pool: " + poolName, content, parent) } } }
Example 62
Source File: PagedTableSuite.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.ui import scala.xml.Node import org.apache.spark.SparkFunSuite class PagedDataSourceSuite extends SparkFunSuite { test("basic") { val dataSource1 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource1.pageData(1) === PageData(3, (1 to 2))) val dataSource2 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource2.pageData(2) === PageData(3, (3 to 4))) val dataSource3 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource3.pageData(3) === PageData(3, Seq(5))) val dataSource4 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) val e1 = intercept[IndexOutOfBoundsException] { dataSource4.pageData(4) } assert(e1.getMessage === "Page 4 is out of range. Please select a page number between 1 and 3.") val dataSource5 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) val e2 = intercept[IndexOutOfBoundsException] { dataSource5.pageData(0) } assert(e2.getMessage === "Page 0 is out of range. Please select a page number between 1 and 3.") } } class PagedTableSuite extends SparkFunSuite { test("pageNavigation") { // Create a fake PagedTable to test pageNavigation val pagedTable = new PagedTable[Int] { override def tableId: String = "" override def tableCssClass: String = "" override def dataSource: PagedDataSource[Int] = null override def pageLink(page: Int): String = page.toString override def headers: Seq[Node] = Nil override def row(t: Int): Seq[Node] = Nil override def pageSizeFormField: String = "pageSize" override def prevPageSizeFormField: String = "prevPageSize" override def pageNumberFormField: String = "page" override def goButtonFormPath: String = "" } assert(pagedTable.pageNavigation(1, 10, 1) === Nil) assert( (pagedTable.pageNavigation(1, 10, 2).head \\ "li").map(_.text.trim) === Seq("1", "2", ">")) assert( (pagedTable.pageNavigation(2, 10, 2).head \\ "li").map(_.text.trim) === Seq("<", "1", "2")) assert((pagedTable.pageNavigation(1, 10, 100).head \\ "li").map(_.text.trim) === (1 to 10).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(2, 10, 100).head \\ "li").map(_.text.trim) === Seq("<") ++ (1 to 10).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(100, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 100).map(_.toString)) assert((pagedTable.pageNavigation(99, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 100).map(_.toString) ++ Seq(">")) assert((pagedTable.pageNavigation(11, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (11 to 20).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(93, 10, 97).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 97).map(_.toString) ++ Seq(">")) } } private[spark] class SeqPagedDataSource[T](seq: Seq[T], pageSize: Int) extends PagedDataSource[T](pageSize) { override protected def dataSize: Int = seq.size override protected def sliceData(from: Int, to: Int): Seq[T] = seq.slice(from, to) }
Example 63
Source File: StagePageSuite.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.ui import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.mockito.Mockito.{RETURNS_SMART_NULLS, mock, when} import org.apache.spark._ import org.apache.spark.executor.TaskMetrics import org.apache.spark.scheduler._ import org.apache.spark.storage.StorageStatusListener import org.apache.spark.ui.exec.ExecutorsListener import org.apache.spark.ui.jobs.{JobProgressListener, StagePage, StagesTab} import org.apache.spark.ui.scope.RDDOperationGraphListener import org.apache.spark.util.Utils class StagePageSuite extends SparkFunSuite with LocalSparkContext { private val peakExecutionMemory = 10 test("peak execution memory should displayed") { val conf = new SparkConf(false) val html = renderStagePage(conf).toString().toLowerCase val targetString = "peak execution memory" assert(html.contains(targetString)) } test("SPARK-10543: peak execution memory should be per-task rather than cumulative") { val conf = new SparkConf(false) val html = renderStagePage(conf).toString().toLowerCase // verify min/25/50/75/max show task value not cumulative values assert(html.contains(s"<td>$peakExecutionMemory.0 b</td>" * 5)) } private def renderStagePage(conf: SparkConf): Seq[Node] = { val jobListener = new JobProgressListener(conf, Utils.getCurrentUserName()) val graphListener = new RDDOperationGraphListener(conf) val executorsListener = new ExecutorsListener(new StorageStatusListener(conf), conf) val tab = mock(classOf[StagesTab], RETURNS_SMART_NULLS) val request = mock(classOf[HttpServletRequest]) when(tab.conf).thenReturn(conf) when(tab.progressListener).thenReturn(jobListener) when(tab.operationGraphListener).thenReturn(graphListener) when(tab.executorsListener).thenReturn(executorsListener) when(tab.appName).thenReturn("testing") when(tab.headerTabs).thenReturn(Seq.empty) when(request.getParameter("id")).thenReturn("0") when(request.getParameter("attempt")).thenReturn("0") val page = new StagePage(tab) // Simulate a stage in job progress listener val stageInfo = new StageInfo(0, 0, "dummy", 1, Seq.empty, Seq.empty, "details") // Simulate two tasks to test PEAK_EXECUTION_MEMORY correctness (1 to 2).foreach { taskId => val taskInfo = new TaskInfo(taskId, taskId, 0, 0, "0", "localhost", TaskLocality.ANY, false) jobListener.onStageSubmitted(SparkListenerStageSubmitted(stageInfo)) jobListener.onTaskStart(SparkListenerTaskStart(0, 0, taskInfo)) taskInfo.markFinished(TaskState.FINISHED) val taskMetrics = TaskMetrics.empty taskMetrics.incPeakExecutionMemory(peakExecutionMemory) jobListener.onTaskEnd( SparkListenerTaskEnd(0, 0, "result", Success, taskInfo, taskMetrics)) } jobListener.onStageCompleted(SparkListenerStageCompleted(stageInfo)) page.render(request) } }
Example 64
Source File: MesosClusterPage.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.mesos.ui import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.mesos.Protos.TaskStatus import org.apache.spark.deploy.mesos.MesosDriverDescription import org.apache.spark.scheduler.cluster.mesos.MesosClusterSubmissionState import org.apache.spark.ui.{UIUtils, WebUIPage} private[mesos] class MesosClusterPage(parent: MesosClusterUI) extends WebUIPage("") { def render(request: HttpServletRequest): Seq[Node] = { val state = parent.scheduler.getSchedulerState() val queuedHeaders = Seq("Driver ID", "Submit Date", "Main Class", "Driver Resources") val driverHeaders = queuedHeaders ++ Seq("Start Date", "Mesos Slave ID", "State") val retryHeaders = Seq("Driver ID", "Submit Date", "Description") ++ Seq("Last Failed Status", "Next Retry Time", "Attempt Count") val queuedTable = UIUtils.listingTable(queuedHeaders, queuedRow, state.queuedDrivers) val launchedTable = UIUtils.listingTable(driverHeaders, driverRow, state.launchedDrivers) val finishedTable = UIUtils.listingTable(driverHeaders, driverRow, state.finishedDrivers) val retryTable = UIUtils.listingTable(retryHeaders, retryRow, state.pendingRetryDrivers) val content = <p>Mesos Framework ID: {state.frameworkId}</p> <div class="row-fluid"> <div class="span12"> <h4>Queued Drivers:</h4> {queuedTable} <h4>Launched Drivers:</h4> {launchedTable} <h4>Finished Drivers:</h4> {finishedTable} <h4>Supervise drivers waiting for retry:</h4> {retryTable} </div> </div>; UIUtils.basicSparkPage(content, "Spark Drivers for Mesos cluster") } private def queuedRow(submission: MesosDriverDescription): Seq[Node] = { val id = submission.submissionId <tr> <td><a href={s"driver?id=$id"}>{id}</a></td> <td>{submission.submissionDate}</td> <td>{submission.command.mainClass}</td> <td>cpus: {submission.cores}, mem: {submission.mem}</td> </tr> } private def driverRow(state: MesosClusterSubmissionState): Seq[Node] = { val id = state.driverDescription.submissionId <tr> <td><a href={s"driver?id=$id"}>{id}</a></td> <td>{state.driverDescription.submissionDate}</td> <td>{state.driverDescription.command.mainClass}</td> <td>cpus: {state.driverDescription.cores}, mem: {state.driverDescription.mem}</td> <td>{state.startDate}</td> <td>{state.slaveId.getValue}</td> <td>{stateString(state.mesosTaskStatus)}</td> </tr> } private def retryRow(submission: MesosDriverDescription): Seq[Node] = { val id = submission.submissionId <tr> <td><a href={s"driver?id=$id"}>{id}</a></td> <td>{submission.submissionDate}</td> <td>{submission.command.mainClass}</td> <td>{submission.retryState.get.lastFailureStatus}</td> <td>{submission.retryState.get.nextRetry}</td> <td>{submission.retryState.get.retries}</td> </tr> } private def stateString(status: Option[TaskStatus]): String = { if (status.isEmpty) { return "" } val sb = new StringBuilder val s = status.get sb.append(s"State: ${s.getState}") if (status.get.hasMessage) { sb.append(s", Message: ${s.getMessage}") } if (status.get.hasHealthy) { sb.append(s", Healthy: ${s.getHealthy}") } if (status.get.hasSource) { sb.append(s", Source: ${s.getSource}") } if (status.get.hasReason) { sb.append(s", Reason: ${s.getReason}") } if (status.get.hasTimestamp) { sb.append(s", Time: ${s.getTimestamp}") } sb.toString() } }
Example 65
Source File: HistoryNotFoundPage.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master.ui import java.net.URLDecoder import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class HistoryNotFoundPage(parent: MasterWebUI) extends WebUIPage("history/not-found") { def render(request: HttpServletRequest): Seq[Node] = { val titleParam = request.getParameter("title") val msgParam = request.getParameter("msg") val exceptionParam = request.getParameter("exception") // If no parameters are specified, assume the user did not enable event logging val defaultTitle = "Event logging is not enabled" val defaultContent = <div class="row-fluid"> <div class="span12" style="font-size:14px"> No event logs were found for this application! To <a href="http://spark.apache.org/docs/latest/monitoring.html">enable event logging</a>, set <span style="font-style:italic">spark.eventLog.enabled</span> to true and <span style="font-style:italic">spark.eventLog.dir</span> to the directory to which your event logs are written. </div> </div> val title = Option(titleParam).getOrElse(defaultTitle) val content = Option(msgParam) .map { msg => URLDecoder.decode(msg, "UTF-8") } .map { msg => <div class="row-fluid"> <div class="span12" style="font-size:14px">{msg}</div> </div> ++ Option(exceptionParam) .map { e => URLDecoder.decode(e, "UTF-8") } .map { e => <pre>{e}</pre> } .getOrElse(Seq.empty) }.getOrElse(defaultContent) UIUtils.basicSparkPage(content, title) } }
Example 66
Source File: ExecutorThreadDumpPage.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.exec import java.net.URLDecoder import javax.servlet.http.HttpServletRequest import scala.util.Try import scala.xml.{Text, Node} import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class ExecutorThreadDumpPage(parent: ExecutorsTab) extends WebUIPage("threadDump") { private val sc = parent.sc def render(request: HttpServletRequest): Seq[Node] = { val executorId = Option(request.getParameter("executorId")).map { executorId => // Due to YARN-2844, "<driver>" in the url will be encoded to "%25253Cdriver%25253E" when // running in yarn-cluster mode. `request.getParameter("executorId")` will return // "%253Cdriver%253E". Therefore we need to decode it until we get the real id. var id = executorId var decodedId = URLDecoder.decode(id, "UTF-8") while (id != decodedId) { id = decodedId decodedId = URLDecoder.decode(id, "UTF-8") } id }.getOrElse { throw new IllegalArgumentException(s"Missing executorId parameter") } val time = System.currentTimeMillis() val maybeThreadDump = sc.get.getExecutorThreadDump(executorId) val content = maybeThreadDump.map { threadDump => val dumpRows = threadDump.map { thread => <div class="accordion-group"> <div class="accordion-heading" onclick="$(this).next().toggleClass('hidden')"> <a class="accordion-toggle"> Thread {thread.threadId}: {thread.threadName} ({thread.threadState}) </a> </div> <div class="accordion-body hidden"> <div class="accordion-inner"> <pre>{thread.stackTrace}</pre> </div> </div> </div> } <div class="row-fluid"> <p>Updated at {UIUtils.formatDate(time)}</p> { // scalastyle:off <p><a class="expandbutton" onClick="$('.accordion-body').removeClass('hidden'); $('.expandbutton').toggleClass('hidden')"> Expand All </a></p> <p><a class="expandbutton hidden" onClick="$('.accordion-body').addClass('hidden'); $('.expandbutton').toggleClass('hidden')"> Collapse All </a></p> // scalastyle:on } <div class="accordion">{dumpRows}</div> </div> }.getOrElse(Text("Error fetching thread dump")) UIUtils.headerSparkPage(s"Thread dump for executor $executorId", content, parent) } }
Example 67
Source File: EnvironmentPage.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.env import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class EnvironmentPage(parent: EnvironmentTab) extends WebUIPage("") { private val listener = parent.listener def render(request: HttpServletRequest): Seq[Node] = { val runtimeInformationTable = UIUtils.listingTable( propertyHeader, jvmRow, listener.jvmInformation, fixedWidth = true) val sparkPropertiesTable = UIUtils.listingTable( propertyHeader, propertyRow, listener.sparkProperties, fixedWidth = true) val systemPropertiesTable = UIUtils.listingTable( propertyHeader, propertyRow, listener.systemProperties, fixedWidth = true) val classpathEntriesTable = UIUtils.listingTable( classPathHeaders, classPathRow, listener.classpathEntries, fixedWidth = true) val content = <span> <h4>Runtime Information</h4> {runtimeInformationTable} <h4>Spark Properties</h4> {sparkPropertiesTable} <h4>System Properties</h4> {systemPropertiesTable} <h4>Classpath Entries</h4> {classpathEntriesTable} </span> UIUtils.headerSparkPage("Environment", content, parent) } private def propertyHeader = Seq("Name", "Value") private def classPathHeaders = Seq("Resource", "Source") private def jvmRow(kv: (String, String)) = <tr><td>{kv._1}</td><td>{kv._2}</td></tr> private def propertyRow(kv: (String, String)) = <tr><td>{kv._1}</td><td>{kv._2}</td></tr> private def classPathRow(data: (String, String)) = <tr><td>{data._1}</td><td>{data._2}</td></tr> }
Example 68
Source File: StoragePage.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.storage import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.storage.RDDInfo import org.apache.spark.ui.{UIUtils, WebUIPage} import org.apache.spark.util.Utils private def rddRow(rdd: RDDInfo): Seq[Node] = { // scalastyle:off <tr> <td> <a href={"%s/storage/rdd?id=%s".format(UIUtils.prependBaseUri(parent.basePath), rdd.id)}> {rdd.name} </a> </td> <td>{rdd.storageLevel.description} </td> <td>{rdd.numCachedPartitions}</td> <td>{"%.0f%%".format(rdd.numCachedPartitions * 100.0 / rdd.numPartitions)}</td> <td sorttable_customkey={rdd.memSize.toString}>{Utils.bytesToString(rdd.memSize)}</td> <td sorttable_customkey={rdd.externalBlockStoreSize.toString}>{Utils.bytesToString(rdd.externalBlockStoreSize)}</td> <td sorttable_customkey={rdd.diskSize.toString} >{Utils.bytesToString(rdd.diskSize)}</td> </tr> // scalastyle:on } }
Example 69
Source File: PoolTable.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.jobs import scala.collection.mutable.HashMap import scala.xml.Node import org.apache.spark.scheduler.{Schedulable, StageInfo} import org.apache.spark.ui.UIUtils private[ui] class PoolTable(pools: Seq[Schedulable], parent: StagesTab) { private val listener = parent.progressListener def toNodeSeq: Seq[Node] = { listener.synchronized { poolTable(poolRow, pools) } } private def poolTable( makeRow: (Schedulable, HashMap[String, HashMap[Int, StageInfo]]) => Seq[Node], rows: Seq[Schedulable]): Seq[Node] = { <table class="table table-bordered table-striped table-condensed sortable table-fixed"> <thead> <th>Pool Name</th> <th>Minimum Share</th> <th>Pool Weight</th> <th>Active Stages</th> <th>Running Tasks</th> <th>SchedulingMode</th> </thead> <tbody> {rows.map(r => makeRow(r, listener.poolToActiveStages))} </tbody> </table> } private def poolRow( p: Schedulable, poolToActiveStages: HashMap[String, HashMap[Int, StageInfo]]): Seq[Node] = { val activeStages = poolToActiveStages.get(p.name) match { case Some(stages) => stages.size case None => 0 } val href = "%s/stages/pool?poolname=%s" .format(UIUtils.prependBaseUri(parent.basePath), p.name) <tr> <td> <a href={href}>{p.name}</a> </td> <td>{p.minShare}</td> <td>{p.weight}</td> <td>{activeStages}</td> <td>{p.runningTasks}</td> <td>{p.schedulingMode}</td> </tr> } }
Example 70
Source File: PoolPage.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.jobs import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.scheduler.StageInfo import org.apache.spark.ui.{WebUIPage, UIUtils} private[ui] class PoolPage(parent: StagesTab) extends WebUIPage("pool") { private val sc = parent.sc private val listener = parent.progressListener def render(request: HttpServletRequest): Seq[Node] = { listener.synchronized { val poolName = request.getParameter("poolname") require(poolName != null && poolName.nonEmpty, "Missing poolname parameter") val poolToActiveStages = listener.poolToActiveStages val activeStages = poolToActiveStages.get(poolName) match { case Some(s) => s.values.toSeq case None => Seq[StageInfo]() } val activeStagesTable = new StageTableBase(activeStages.sortBy(_.submissionTime).reverse, parent.basePath, parent.progressListener, isFairScheduler = parent.isFairScheduler, killEnabled = parent.killEnabled) // For now, pool information is only accessible in live UIs val pools = sc.map(_.getPoolForName(poolName).get).toSeq val poolTable = new PoolTable(pools, parent) val content = <h4>Summary </h4> ++ poolTable.toNodeSeq ++ <h4>{activeStages.size} Active Stages</h4> ++ activeStagesTable.toNodeSeq UIUtils.headerSparkPage("Fair Scheduler Pool: " + poolName, content, parent) } } }
Example 71
Source File: MesosClusterPage.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.mesos.ui import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.mesos.Protos.TaskStatus import org.apache.spark.deploy.mesos.MesosDriverDescription import org.apache.spark.scheduler.cluster.mesos.MesosClusterSubmissionState import org.apache.spark.ui.{UIUtils, WebUIPage} private[mesos] class MesosClusterPage(parent: MesosClusterUI) extends WebUIPage("") { def render(request: HttpServletRequest): Seq[Node] = { val state = parent.scheduler.getSchedulerState() val queuedHeaders = Seq("Driver ID", "Submit Date", "Main Class", "Driver Resources") val driverHeaders = queuedHeaders ++ Seq("Start Date", "Mesos Slave ID", "State") val retryHeaders = Seq("Driver ID", "Submit Date", "Description") ++ Seq("Last Failed Status", "Next Retry Time", "Attempt Count") val queuedTable = UIUtils.listingTable(queuedHeaders, queuedRow, state.queuedDrivers) val launchedTable = UIUtils.listingTable(driverHeaders, driverRow, state.launchedDrivers) val finishedTable = UIUtils.listingTable(driverHeaders, driverRow, state.finishedDrivers) val retryTable = UIUtils.listingTable(retryHeaders, retryRow, state.pendingRetryDrivers) val content = <p>Mesos Framework ID: {state.frameworkId}</p> <div class="row-fluid"> <div class="span12"> <h4>Queued Drivers:</h4> {queuedTable} <h4>Launched Drivers:</h4> {launchedTable} <h4>Finished Drivers:</h4> {finishedTable} <h4>Supervise drivers waiting for retry:</h4> {retryTable} </div> </div>; UIUtils.basicSparkPage(content, "Spark Drivers for Mesos cluster") } private def queuedRow(submission: MesosDriverDescription): Seq[Node] = { val id = submission.submissionId <tr> <td><a href={s"driver?id=$id"}>{id}</a></td> <td>{submission.submissionDate}</td> <td>{submission.command.mainClass}</td> <td>cpus: {submission.cores}, mem: {submission.mem}</td> </tr> } private def driverRow(state: MesosClusterSubmissionState): Seq[Node] = { val id = state.driverDescription.submissionId <tr> <td><a href={s"driver?id=$id"}>{id}</a></td> <td>{state.driverDescription.submissionDate}</td> <td>{state.driverDescription.command.mainClass}</td> <td>cpus: {state.driverDescription.cores}, mem: {state.driverDescription.mem}</td> <td>{state.startDate}</td> <td>{state.slaveId.getValue}</td> <td>{stateString(state.mesosTaskStatus)}</td> </tr> } private def retryRow(submission: MesosDriverDescription): Seq[Node] = { val id = submission.submissionId <tr> <td><a href={s"driver?id=$id"}>{id}</a></td> <td>{submission.submissionDate}</td> <td>{submission.command.mainClass}</td> <td>{submission.retryState.get.lastFailureStatus}</td> <td>{submission.retryState.get.nextRetry}</td> <td>{submission.retryState.get.retries}</td> </tr> } private def stateString(status: Option[TaskStatus]): String = { if (status.isEmpty) { return "" } val sb = new StringBuilder val s = status.get sb.append(s"State: ${s.getState}") if (status.get.hasMessage) { sb.append(s", Message: ${s.getMessage}") } if (status.get.hasHealthy) { sb.append(s", Healthy: ${s.getHealthy}") } if (status.get.hasSource) { sb.append(s", Source: ${s.getSource}") } if (status.get.hasReason) { sb.append(s", Reason: ${s.getReason}") } if (status.get.hasTimestamp) { sb.append(s", Time: ${s.getTimestamp}") } sb.toString() } }
Example 72
Source File: HistoryNotFoundPage.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master.ui import java.net.URLDecoder import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class HistoryNotFoundPage(parent: MasterWebUI) extends WebUIPage("history/not-found") { def render(request: HttpServletRequest): Seq[Node] = { val titleParam = request.getParameter("title") val msgParam = request.getParameter("msg") val exceptionParam = request.getParameter("exception") // If no parameters are specified, assume the user did not enable event logging //如果没有指定参数,假设用户未启用事件日志记录 val defaultTitle = "Event logging is not enabled" val defaultContent = <div class="row-fluid"> <div class="span12" style="font-size:14px"> No event logs were found for this application! To <a href="http://spark.apache.org/docs/latest/monitoring.html">enable event logging</a>, set <span style="font-style:italic">spark.eventLog.enabled</span> to true and <span style="font-style:italic">spark.eventLog.dir</span> to the directory to which your event logs are written. </div> </div> val title = Option(titleParam).getOrElse(defaultTitle) val content = Option(msgParam) .map { msg => URLDecoder.decode(msg, "UTF-8") } .map { msg => <div class="row-fluid"> <div class="span12" style="font-size:14px">{msg}</div> </div> ++ Option(exceptionParam) .map { e => URLDecoder.decode(e, "UTF-8") } .map { e => <pre>{e}</pre> } .getOrElse(Seq.empty) }.getOrElse(defaultContent) UIUtils.basicSparkPage(content, title) } }
Example 73
Source File: ApplicationPage.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master.ui import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.deploy.ExecutorState import org.apache.spark.deploy.DeployMessages.{MasterStateResponse, RequestMasterState} import org.apache.spark.deploy.master.ExecutorDesc import org.apache.spark.ui.{UIUtils, WebUIPage} import org.apache.spark.util.Utils private[ui] class ApplicationPage(parent: MasterWebUI) extends WebUIPage("app") { private val master = parent.masterEndpointRef def render(request: HttpServletRequest): Seq[Node] = { val appId = request.getParameter("appId") val state = master.askWithRetry[MasterStateResponse](RequestMasterState) val app = state.activeApps.find(_.id == appId).getOrElse({ state.completedApps.find(_.id == appId).getOrElse(null) }) if (app == null) { val msg = <div class="row-fluid">No running application with ID {appId}</div> return UIUtils.basicSparkPage(msg, "Not Found") } val executorHeaders = Seq("ExecutorID", "Worker", "Cores", "Memory", "State", "Logs") val allExecutors = (app.executors.values ++ app.removedExecutors).toSet.toSeq // This includes executors that are either still running or have exited cleanly //这包括仍在运行或已经完全退出的执行者 val executors = allExecutors.filter { exec => !ExecutorState.isFinished(exec.state) || exec.state == ExecutorState.EXITED } val removedExecutors = allExecutors.diff(executors) val executorsTable = UIUtils.listingTable(executorHeaders, executorRow, executors) val removedExecutorsTable = UIUtils.listingTable(executorHeaders, executorRow, removedExecutors) val content = <div class="row-fluid"> <div class="span12"> <ul class="unstyled"> <li><strong>ID:</strong> {app.id}</li> <li><strong>Name:</strong> {app.desc.name}</li> <li><strong>User:</strong> {app.desc.user}</li> <li><strong>Cores:</strong> { if (app.desc.maxCores.isEmpty) { "Unlimited (%s granted)".format(app.coresGranted) } else { "%s (%s granted, %s left)".format( app.desc.maxCores.get, app.coresGranted, app.coresLeft) } } </li> <li> <strong>Executor Memory:</strong> {Utils.megabytesToString(app.desc.memoryPerExecutorMB)} </li> <li><strong>Submit Date:</strong> {app.submitDate}</li> <li><strong>State:</strong> {app.state}</li> <li><strong><a href={app.desc.appUiUrl}>Application Detail UI</a></strong></li> </ul> </div> </div> <div class="row-fluid"> <!-- Executors --> <div class="span12"> <h4> Executor Summary </h4> {executorsTable} { if (removedExecutors.nonEmpty) { <h4> Removed Executors </h4> ++ removedExecutorsTable } } </div> </div>; UIUtils.basicSparkPage(content, "Application: " + app.desc.name) } private def executorRow(executor: ExecutorDesc): Seq[Node] = { <tr> <td>{executor.id}</td> <td> <a href={executor.worker.webUiAddress}>{executor.worker.id}</a> </td> <td>{executor.cores}</td> <td>{executor.memory}</td> <td>{executor.state}</td> <td> <a href={"%s/logPage?appId=%s&executorId=%s&logType=stdout" .format(executor.worker.webUiAddress, executor.application.id, executor.id)}>stdout</a> <a href={"%s/logPage?appId=%s&executorId=%s&logType=stderr" .format(executor.worker.webUiAddress, executor.application.id, executor.id)}>stderr</a> </td> </tr> } }
Example 74
Source File: ExecutorThreadDumpPage.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.exec import java.net.URLDecoder import javax.servlet.http.HttpServletRequest import scala.util.Try import scala.xml.{Text, Node} import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class ExecutorThreadDumpPage(parent: ExecutorsTab) extends WebUIPage("threadDump") { private val sc = parent.sc def render(request: HttpServletRequest): Seq[Node] = { val executorId = Option(request.getParameter("executorId")).map { executorId => // Due to YARN-2844, "<driver>" in the url will be encoded to "%25253Cdriver%25253E" when // running in yarn-cluster mode. `request.getParameter("executorId")` will return // "%253Cdriver%253E". Therefore we need to decode it until we get the real id. var id = executorId var decodedId = URLDecoder.decode(id, "UTF-8") while (id != decodedId) { id = decodedId decodedId = URLDecoder.decode(id, "UTF-8") } id }.getOrElse { throw new IllegalArgumentException(s"Missing executorId parameter") } val time = System.currentTimeMillis() val maybeThreadDump = sc.get.getExecutorThreadDump(executorId) val content = maybeThreadDump.map { threadDump => val dumpRows = threadDump.sortWith { case (threadTrace1, threadTrace2) => { val v1 = if (threadTrace1.threadName.contains("Executor task launch")) 1 else 0 val v2 = if (threadTrace2.threadName.contains("Executor task launch")) 1 else 0 if (v1 == v2) { threadTrace1.threadName.toLowerCase < threadTrace2.threadName.toLowerCase } else { v1 > v2 } } }.map { thread => val threadName = thread.threadName val className = "accordion-heading " + { if (threadName.contains("Executor task launch")) { "executor-thread" } else { "non-executor-thread" } } <div class="accordion-group"> <div class={className} onclick="$(this).next().toggleClass('hidden')"> <a class="accordion-toggle"> Thread {thread.threadId}: {threadName} ({thread.threadState}) </a> </div> <div class="accordion-body hidden"> <div class="accordion-inner"> <pre>{thread.stackTrace}</pre> </div> </div> </div> } <div class="row-fluid"> <p>Updated at {UIUtils.formatDate(time)}</p> { // scalastyle:off <p><a class="expandbutton" onClick="$('.accordion-body').removeClass('hidden'); $('.expandbutton').toggleClass('hidden')"> Expand All </a></p> <p><a class="expandbutton hidden" onClick="$('.accordion-body').addClass('hidden'); $('.expandbutton').toggleClass('hidden')"> Collapse All </a></p> // scalastyle:on } <div class="accordion">{dumpRows}</div> </div> }.getOrElse(Text("Error fetching thread dump")) UIUtils.headerSparkPage(s"Thread dump for executor $executorId", content, parent) } }
Example 75
Source File: EnvironmentPage.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.env import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class EnvironmentPage(parent: EnvironmentTab) extends WebUIPage("") { private val listener = parent.listener def render(request: HttpServletRequest): Seq[Node] = { val runtimeInformationTable = UIUtils.listingTable( propertyHeader, jvmRow, listener.jvmInformation, fixedWidth = true) val sparkPropertiesTable = UIUtils.listingTable( propertyHeader, propertyRow, listener.sparkProperties, fixedWidth = true) val systemPropertiesTable = UIUtils.listingTable( propertyHeader, propertyRow, listener.systemProperties, fixedWidth = true) val classpathEntriesTable = UIUtils.listingTable( classPathHeaders, classPathRow, listener.classpathEntries, fixedWidth = true) val content = <span> <h4>Runtime Information</h4> {runtimeInformationTable} <h4>Spark Properties</h4> {sparkPropertiesTable} <h4>System Properties</h4> {systemPropertiesTable} <h4>Classpath Entries</h4> {classpathEntriesTable} </span> UIUtils.headerSparkPage("Environment", content, parent) } private def propertyHeader = Seq("Name", "Value") private def classPathHeaders = Seq("Resource", "Source") private def jvmRow(kv: (String, String)) = <tr><td>{kv._1}</td><td>{kv._2}</td></tr> private def propertyRow(kv: (String, String)) = <tr><td>{kv._1}</td><td>{kv._2}</td></tr> private def classPathRow(data: (String, String)) = <tr><td>{data._1}</td><td>{data._2}</td></tr> }
Example 76
Source File: PoolTable.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.jobs import scala.collection.mutable.HashMap import scala.xml.Node import org.apache.spark.scheduler.{Schedulable, StageInfo} import org.apache.spark.ui.UIUtils private[ui] class PoolTable(pools: Seq[Schedulable], parent: StagesTab) { private val listener = parent.progressListener def toNodeSeq: Seq[Node] = { listener.synchronized { poolTable(poolRow, pools) } } private def poolTable( makeRow: (Schedulable, HashMap[String, HashMap[Int, StageInfo]]) => Seq[Node], rows: Seq[Schedulable]): Seq[Node] = { <table class="table table-bordered table-striped table-condensed sortable table-fixed"> <thead> <th>Pool Name</th> <th>Minimum Share</th> <th>Pool Weight</th> <th>Active Stages</th> <th>Running Tasks</th> <th>SchedulingMode</th> </thead> <tbody> {rows.map(r => makeRow(r, listener.poolToActiveStages))} </tbody> </table> } private def poolRow( p: Schedulable, poolToActiveStages: HashMap[String, HashMap[Int, StageInfo]]): Seq[Node] = { val activeStages = poolToActiveStages.get(p.name) match { case Some(stages) => stages.size case None => 0 } val href = "%s/stages/pool?poolname=%s" .format(UIUtils.prependBaseUri(parent.basePath), p.name) <tr> <td> <a href={href}>{p.name}</a> </td> <td>{p.minShare}</td> <td>{p.weight}</td> <td>{activeStages}</td> <td>{p.runningTasks}</td> <td>{p.schedulingMode}</td> </tr> } }
Example 77
Source File: PoolPage.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.jobs import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.scheduler.StageInfo import org.apache.spark.ui.{WebUIPage, UIUtils} private[ui] class PoolPage(parent: StagesTab) extends WebUIPage("pool") { private val sc = parent.sc private val listener = parent.progressListener def render(request: HttpServletRequest): Seq[Node] = { listener.synchronized { val poolName = request.getParameter("poolname") require(poolName != null && poolName.nonEmpty, "Missing poolname parameter") val poolToActiveStages = listener.poolToActiveStages val activeStages = poolToActiveStages.get(poolName) match { case Some(s) => s.values.toSeq case None => Seq[StageInfo]() } val activeStagesTable = new StageTableBase(activeStages.sortBy(_.submissionTime).reverse, parent.basePath, parent.progressListener, isFairScheduler = parent.isFairScheduler, killEnabled = parent.killEnabled) // For now, pool information is only accessible in live UIs //现在,池信息只能在实时UI中访问 val pools = sc.map(_.getPoolForName(poolName).get).toSeq val poolTable = new PoolTable(pools, parent) val content = <h4>Summary </h4> ++ poolTable.toNodeSeq ++ <h4>{activeStages.size} Active Stages</h4> ++ activeStagesTable.toNodeSeq UIUtils.headerSparkPage("Fair Scheduler Pool: " + poolName, content, parent) } } }
Example 78
Source File: PagedTableSuite.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.ui import scala.xml.Node import org.apache.spark.SparkFunSuite class PagedDataSourceSuite extends SparkFunSuite { test("basic") { val dataSource1 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource1.pageData(1) === PageData(3, (1 to 2))) val dataSource2 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource2.pageData(2) === PageData(3, (3 to 4))) val dataSource3 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource3.pageData(3) === PageData(3, Seq(5))) val dataSource4 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) val e1 = intercept[IndexOutOfBoundsException] { dataSource4.pageData(4) } assert(e1.getMessage === "Page 4 is out of range. Please select a page number between 1 and 3.") val dataSource5 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) val e2 = intercept[IndexOutOfBoundsException] { dataSource5.pageData(0) } assert(e2.getMessage === "Page 0 is out of range. Please select a page number between 1 and 3.") } } class PagedTableSuite extends SparkFunSuite { test("pageNavigation") {//页面导航 // Create a fake PagedTable to test pageNavigation //创建一个假的PagedTable来测试pageNavigation val pagedTable = new PagedTable[Int] { override def tableId: String = "" override def tableCssClass: String = "" override def dataSource: PagedDataSource[Int] = null override def pageLink(page: Int): String = page.toString override def headers: Seq[Node] = Nil override def row(t: Int): Seq[Node] = Nil override def goButtonJavascriptFunction: (String, String) = ("", "") } assert(pagedTable.pageNavigation(1, 10, 1) === Nil) assert( (pagedTable.pageNavigation(1, 10, 2).head \\ "li").map(_.text.trim) === Seq("1", "2", ">")) assert( (pagedTable.pageNavigation(2, 10, 2).head \\ "li").map(_.text.trim) === Seq("<", "1", "2")) assert((pagedTable.pageNavigation(1, 10, 100).head \\ "li").map(_.text.trim) === (1 to 10).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(2, 10, 100).head \\ "li").map(_.text.trim) === Seq("<") ++ (1 to 10).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(100, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 100).map(_.toString)) assert((pagedTable.pageNavigation(99, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 100).map(_.toString) ++ Seq(">")) assert((pagedTable.pageNavigation(11, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (11 to 20).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(93, 10, 97).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 97).map(_.toString) ++ Seq(">")) } } private[spark] class SeqPagedDataSource[T](seq: Seq[T], pageSize: Int) extends PagedDataSource[T](pageSize) { override protected def dataSize: Int = seq.size override protected def sliceData(from: Int, to: Int): Seq[T] = seq.slice(from, to) }
Example 79
Source File: StagePageSuite.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.ui import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.mockito.Mockito.{mock, when, RETURNS_SMART_NULLS} import org.apache.spark._ import org.apache.spark.executor.TaskMetrics import org.apache.spark.scheduler._ import org.apache.spark.ui.jobs.{JobProgressListener, StagePage, StagesTab} import org.apache.spark.ui.scope.RDDOperationGraphListener class StagePageSuite extends SparkFunSuite with LocalSparkContext { //仅在启用不安全时才显示执行内存值 test("peak execution memory only displayed if unsafe is enabled") { val unsafeConf = "spark.sql.unsafe.enabled" val conf = new SparkConf(false).set(unsafeConf, "true") val html = renderStagePage(conf).toString().toLowerCase println("===="+html) val targetString = "peak execution memory" assert(html.contains(targetString)) // Disable unsafe and make sure it's not there //禁用不安全的,并确保它不在那里 val conf2 = new SparkConf(false).set(unsafeConf, "false") val html2 = renderStagePage(conf2).toString().toLowerCase assert(!html2.contains(targetString)) // Avoid setting anything; it should be displayed by default //避免设置任何东西,它应该默认显示 val conf3 = new SparkConf(false) val html3 = renderStagePage(conf3).toString().toLowerCase assert(html3.contains(targetString)) } test("SPARK-10543: peak execution memory should be per-task rather than cumulative") { val unsafeConf = "spark.sql.unsafe.enabled" val conf = new SparkConf(false).set(unsafeConf, "true") val html = renderStagePage(conf).toString().toLowerCase // verify min/25/50/75/max show task value not cumulative values //验证min / 25/50/75 / max显示任务值不是累积值 assert(html.contains("<td>10.0 b</td>" * 5)) } private def renderStagePage(conf: SparkConf): Seq[Node] = { val jobListener = new JobProgressListener(conf) val graphListener = new RDDOperationGraphListener(conf) val tab = mock(classOf[StagesTab], RETURNS_SMART_NULLS) val request = mock(classOf[HttpServletRequest]) when(tab.conf).thenReturn(conf) when(tab.progressListener).thenReturn(jobListener) when(tab.operationGraphListener).thenReturn(graphListener) when(tab.appName).thenReturn("testing") when(tab.headerTabs).thenReturn(Seq.empty) when(request.getParameter("id")).thenReturn("0") when(request.getParameter("attempt")).thenReturn("0") val page = new StagePage(tab) // Simulate a stage in job progress listener //在工作进度侦听器中模拟一个阶段 val stageInfo = new StageInfo(0, 0, "dummy", 1, Seq.empty, Seq.empty, "details") // Simulate two tasks to test PEAK_EXECUTION_MEMORY correctness (1 to 2).foreach { taskId => val taskInfo = new TaskInfo(taskId, taskId, 0, 0, "0", "localhost", TaskLocality.ANY, false) val peakExecutionMemory = 10 taskInfo.accumulables += new AccumulableInfo(0, InternalAccumulator.PEAK_EXECUTION_MEMORY, Some(peakExecutionMemory.toString), (peakExecutionMemory * taskId).toString, true) jobListener.onStageSubmitted(SparkListenerStageSubmitted(stageInfo)) jobListener.onTaskStart(SparkListenerTaskStart(0, 0, taskInfo)) taskInfo.markSuccessful() jobListener.onTaskEnd( SparkListenerTaskEnd(0, 0, "result", Success, taskInfo, TaskMetrics.empty)) } jobListener.onStageCompleted(SparkListenerStageCompleted(stageInfo)) page.render(request) } }
Example 80
Source File: WebCrawler.scala From CSYE7200 with MIT License | 5 votes |
package edu.neu.coe.csye7200.asstwc import java.net.URL import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent._ import scala.concurrent.duration._ import scala.io.{BufferedSource, Source} import scala.language.postfixOps import scala.util._ import scala.util.control.NonFatal import scala.xml.Node object WebCrawler extends App { def getURLContent(u: URL): Future[String] = for { s <- MonadOps.asFuture(SourceFromURL(u)) w <- MonadOps.asFuture(sourceToString(s, s"Cannot read from source at $u")) } yield w def wget(u: URL): Future[Seq[URL]] = { // Hint: write as a for-comprehension, using the method createURL(Option[URL], String) to get the appropriate URL for relative links // 16 points. def getURLs(ns: Node): Seq[Try[URL]] = ??? // TO BE IMPLEMENTED def getLinks(g: String): Try[Seq[URL]] = { val ny = HTMLParser.parse(g) recoverWith { case f => Failure(new RuntimeException(s"parse problem with URL $u: $f")) } for (n <- ny; z <- MonadOps.sequence(getURLs(n))) yield z } // Hint: write as a for-comprehension, using getURLContent (above) and getLinks above. You might also need MonadOps.asFuture // 9 points. ??? // TO BE IMPLEMENTED } def wget(us: Seq[URL]): Future[Seq[Either[Throwable, Seq[URL]]]] = { val us2 = us.distinct take 10 // Hint: Use wget(URL) (above). MonadOps.sequence and Future.sequence are also available to you to use. // 15 points. Implement the rest of this, based on us2 instead of us. // TO BE IMPLEMENTED ??? } def crawler(depth: Int, us: Seq[URL]): Future[Seq[URL]] = { def inner(urls: Seq[URL], depth: Int, accum: Seq[URL]): Future[Seq[URL]] = if (depth > 0) for (us <- MonadOps.flattenRecover(wget(urls), { x => System.err.println(s"""crawler: ignoring exception $x ${if (x.getCause != null) " with cause " + x.getCause else ""}""") }); r <- inner(us, depth - 1, accum ++: urls)) yield r else Future.successful(accum) inner(us, depth, Nil) } println(s"web reader: ${args.toList}") val uys = for (arg <- args toList) yield getURL(arg) val s = MonadOps.sequence(uys) s match { case Success(z) => println(s"invoking crawler on $z") val f = crawler(2, z) Await.ready(f, Duration("60 second")) for (x <- f) println(s"Links: $x") case Failure(z) => println(s"failure: $z") } private def sourceToString(source: BufferedSource, errorMsg: String): Try[String] = try Success(source mkString) catch { case NonFatal(e) => Failure(WebCrawlerException(errorMsg, e)) } private def getURL(resource: String): Try[URL] = createURL(null, resource) private def createURL(context: Option[URL], resource: String): Try[URL] = try Success(new URL(context.orNull, resource)) catch { case NonFatal(e) => val message: String = s"""Bad URL: ${if (context.isDefined) "context: " + context else ""} resource=$resource""" Failure(WebCrawlerException(message, e)) } private def SourceFromURL(resource: URL): Try[BufferedSource] = try Success(Source.fromURL(resource)) catch { case NonFatal(e) => Failure(WebCrawlerException(s"""Cannot get source from URL: $resource""", e)) } } case class WebCrawlerException(url: String, cause: Throwable) extends Exception(s"Web Crawler could not decode URL: $url", cause)
Example 81
Source File: HistoryPage.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.history import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.status.api.v1.ApplicationInfo import org.apache.spark.ui.{UIUtils, WebUIPage} private[history] class HistoryPage(parent: HistoryServer) extends WebUIPage("") { def render(request: HttpServletRequest): Seq[Node] = { // stripXSS is called first to remove suspicious characters used in XSS attacks val requestedIncomplete = Option(UIUtils.stripXSS(request.getParameter("showIncomplete"))).getOrElse("false").toBoolean val allAppsSize = parent.getApplicationList() .count(isApplicationCompleted(_) != requestedIncomplete) val eventLogsUnderProcessCount = parent.getEventLogsUnderProcess() val lastUpdatedTime = parent.getLastUpdatedTime() val providerConfig = parent.getProviderConfig() val content = <script src={UIUtils.prependBaseUri("/static/historypage-common.js")}></script> ++ <script src={UIUtils.prependBaseUri("/static/utils.js")}></script> <div> <div class="container-fluid"> <ul class="unstyled"> {providerConfig.map { case (k, v) => <li><strong>{k}:</strong> {v}</li> }} </ul> { if (eventLogsUnderProcessCount > 0) { <p>There are {eventLogsUnderProcessCount} event log(s) currently being processed which may result in additional applications getting listed on this page. Refresh the page to view updates. </p> } } { if (lastUpdatedTime > 0) { <p>Last updated: <span id="last-updated">{lastUpdatedTime}</span></p> } } { <p>Client local time zone: <span id="time-zone"></span></p> } { if (allAppsSize > 0) { <script src={UIUtils.prependBaseUri("/static/dataTables.rowsGroup.js")}></script> ++ <div id="history-summary" class="row-fluid"></div> ++ <script src={UIUtils.prependBaseUri("/static/historypage.js")}></script> ++ <script>setAppLimit({parent.maxApplications})</script> } else if (requestedIncomplete) { <h4>No incomplete applications found!</h4> } else if (eventLogsUnderProcessCount > 0) { <h4>No completed applications found!</h4> } else { <h4>No completed applications found!</h4> ++ parent.emptyListingHtml } } <a href={makePageLink(!requestedIncomplete)}> { if (requestedIncomplete) { "Back to completed applications" } else { "Show incomplete applications" } } </a> </div> </div> UIUtils.basicSparkPage(content, "History Server", true) } private def makePageLink(showIncomplete: Boolean): String = { UIUtils.prependBaseUri("/?" + "showIncomplete=" + showIncomplete) } private def isApplicationCompleted(appInfo: ApplicationInfo): Boolean = { appInfo.attempts.nonEmpty && appInfo.attempts.head.completed } }
Example 82
Source File: ExecutorsTab.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.exec import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.ui.{SparkUI, SparkUITab, UIUtils, WebUIPage} private[ui] class ExecutorsTab(parent: SparkUI) extends SparkUITab(parent, "executors") { init() private def init(): Unit = { val threadDumpEnabled = parent.sc.isDefined && parent.conf.getBoolean("spark.ui.threadDumpsEnabled", true) attachPage(new ExecutorsPage(this, threadDumpEnabled)) if (threadDumpEnabled) { attachPage(new ExecutorThreadDumpPage(this, parent.sc)) } } } private[ui] class ExecutorsPage( parent: SparkUITab, threadDumpEnabled: Boolean) extends WebUIPage("") { def render(request: HttpServletRequest): Seq[Node] = { val content = <div> { <div id="active-executors" class="row-fluid"></div> ++ <script src={UIUtils.prependBaseUri("/static/utils.js")}></script> ++ <script src={UIUtils.prependBaseUri("/static/executorspage.js")}></script> ++ <script>setThreadDumpEnabled({threadDumpEnabled})</script> } </div> UIUtils.headerSparkPage("Executors", content, parent, useDataTables = true) } }
Example 83
Source File: ExecutorThreadDumpPage.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.exec import java.util.Locale import javax.servlet.http.HttpServletRequest import scala.xml.{Node, Text} import org.apache.spark.SparkContext import org.apache.spark.ui.{SparkUITab, UIUtils, WebUIPage} private[ui] class ExecutorThreadDumpPage( parent: SparkUITab, sc: Option[SparkContext]) extends WebUIPage("threadDump") { // stripXSS is called first to remove suspicious characters used in XSS attacks def render(request: HttpServletRequest): Seq[Node] = { val executorId = Option(UIUtils.stripXSS(request.getParameter("executorId"))).map { executorId => UIUtils.decodeURLParameter(executorId) }.getOrElse { throw new IllegalArgumentException(s"Missing executorId parameter") } val time = System.currentTimeMillis() val maybeThreadDump = sc.get.getExecutorThreadDump(executorId) val content = maybeThreadDump.map { threadDump => val dumpRows = threadDump.sortWith { case (threadTrace1, threadTrace2) => val v1 = if (threadTrace1.threadName.contains("Executor task launch")) 1 else 0 val v2 = if (threadTrace2.threadName.contains("Executor task launch")) 1 else 0 if (v1 == v2) { threadTrace1.threadName.toLowerCase(Locale.ROOT) < threadTrace2.threadName.toLowerCase(Locale.ROOT) } else { v1 > v2 } }.map { thread => val threadId = thread.threadId val blockedBy = thread.blockedByThreadId match { case Some(_) => <div> Blocked by <a href={s"#${thread.blockedByThreadId}_td_id"}> Thread {thread.blockedByThreadId} {thread.blockedByLock}</a> </div> case None => Text("") } val heldLocks = thread.holdingLocks.mkString(", ") <tr id={s"thread_${threadId}_tr"} class="accordion-heading" onclick={s"toggleThreadStackTrace($threadId, false)"} onmouseover={s"onMouseOverAndOut($threadId)"} onmouseout={s"onMouseOverAndOut($threadId)"}> <td id={s"${threadId}_td_id"}>{threadId}</td> <td id={s"${threadId}_td_name"}>{thread.threadName}</td> <td id={s"${threadId}_td_state"}>{thread.threadState}</td> <td id={s"${threadId}_td_locking"}>{blockedBy}{heldLocks}</td> <td id={s"${threadId}_td_stacktrace"} class="hidden">{thread.stackTrace}</td> </tr> } <div class="row-fluid"> <p>Updated at {UIUtils.formatDate(time)}</p> { // scalastyle:off <p><a class="expandbutton" onClick="expandAllThreadStackTrace(true)"> Expand All </a></p> <p><a class="expandbutton hidden" onClick="collapseAllThreadStackTrace(true)"> Collapse All </a></p> <div class="form-inline"> <div class="bs-example" data-example-id="simple-form-inline"> <div class="form-group"> <div class="input-group"> Search: <input type="text" class="form-control" id="search" oninput="onSearchStringChange()"></input> </div> </div> </div> </div> <p></p> // scalastyle:on } <table class={UIUtils.TABLE_CLASS_STRIPED + " accordion-group" + " sortable"}> <thead> <th onClick="collapseAllThreadStackTrace(false)">Thread ID</th> <th onClick="collapseAllThreadStackTrace(false)">Thread Name</th> <th onClick="collapseAllThreadStackTrace(false)">Thread State</th> <th onClick="collapseAllThreadStackTrace(false)">Thread Locks</th> </thead> <tbody>{dumpRows}</tbody> </table> </div> }.getOrElse(Text("Error fetching thread dump")) UIUtils.headerSparkPage(s"Thread dump for executor $executorId", content, parent) } }
Example 84
Source File: EnvironmentPage.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.env import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.SparkConf import org.apache.spark.status.AppStatusStore import org.apache.spark.ui._ import org.apache.spark.util.Utils private[ui] class EnvironmentPage( parent: EnvironmentTab, conf: SparkConf, store: AppStatusStore) extends WebUIPage("") { def render(request: HttpServletRequest): Seq[Node] = { val appEnv = store.environmentInfo() val jvmInformation = Map( "Java Version" -> appEnv.runtime.javaVersion, "Java Home" -> appEnv.runtime.javaHome, "Scala Version" -> appEnv.runtime.scalaVersion) val runtimeInformationTable = UIUtils.listingTable( propertyHeader, jvmRow, jvmInformation, fixedWidth = true) val sparkPropertiesTable = UIUtils.listingTable(propertyHeader, propertyRow, Utils.redact(conf, appEnv.sparkProperties.toSeq), fixedWidth = true) val systemPropertiesTable = UIUtils.listingTable( propertyHeader, propertyRow, appEnv.systemProperties, fixedWidth = true) val classpathEntriesTable = UIUtils.listingTable( classPathHeaders, classPathRow, appEnv.classpathEntries, fixedWidth = true) val content = <span> <h4>Runtime Information</h4> {runtimeInformationTable} <h4>Spark Properties</h4> {sparkPropertiesTable} <h4>System Properties</h4> {systemPropertiesTable} <h4>Classpath Entries</h4> {classpathEntriesTable} </span> UIUtils.headerSparkPage("Environment", content, parent) } private def propertyHeader = Seq("Name", "Value") private def classPathHeaders = Seq("Resource", "Source") private def jvmRow(kv: (String, String)) = <tr><td>{kv._1}</td><td>{kv._2}</td></tr> private def propertyRow(kv: (String, String)) = <tr><td>{kv._1}</td><td>{kv._2}</td></tr> private def classPathRow(data: (String, String)) = <tr><td>{data._1}</td><td>{data._2}</td></tr> } private[ui] class EnvironmentTab( parent: SparkUI, store: AppStatusStore) extends SparkUITab(parent, "environment") { attachPage(new EnvironmentPage(this, parent.conf, store)) }
Example 85
Source File: PoolTable.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.jobs import java.net.URLEncoder import scala.xml.Node import org.apache.spark.scheduler.Schedulable import org.apache.spark.status.PoolData import org.apache.spark.ui.UIUtils private[ui] class PoolTable(pools: Map[Schedulable, PoolData], parent: StagesTab) { def toNodeSeq: Seq[Node] = { <table class="table table-bordered table-striped table-condensed sortable table-fixed"> <thead> <th>Pool Name</th> <th>Minimum Share</th> <th>Pool Weight</th> <th>Active Stages</th> <th>Running Tasks</th> <th>SchedulingMode</th> </thead> <tbody> {pools.map { case (s, p) => poolRow(s, p) }} </tbody> </table> } private def poolRow(s: Schedulable, p: PoolData): Seq[Node] = { val activeStages = p.stageIds.size val href = "%s/stages/pool?poolname=%s" .format(UIUtils.prependBaseUri(parent.basePath), URLEncoder.encode(p.name, "UTF-8")) <tr> <td> <a href={href}>{p.name}</a> </td> <td>{s.minShare}</td> <td>{s.weight}</td> <td>{activeStages}</td> <td>{s.runningTasks}</td> <td>{s.schedulingMode}</td> </tr> } }
Example 86
Source File: PoolPage.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.jobs import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.status.PoolData import org.apache.spark.status.api.v1._ import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class PoolPage(parent: StagesTab) extends WebUIPage("pool") { def render(request: HttpServletRequest): Seq[Node] = { // stripXSS is called first to remove suspicious characters used in XSS attacks val poolName = Option(UIUtils.stripXSS(request.getParameter("poolname"))).map { poolname => UIUtils.decodeURLParameter(poolname) }.getOrElse { throw new IllegalArgumentException(s"Missing poolname parameter") } // For now, pool information is only accessible in live UIs val pool = parent.sc.flatMap(_.getPoolForName(poolName)).getOrElse { throw new IllegalArgumentException(s"Unknown pool: $poolName") } val uiPool = parent.store.asOption(parent.store.pool(poolName)).getOrElse( new PoolData(poolName, Set())) val activeStages = uiPool.stageIds.toSeq.map(parent.store.lastStageAttempt(_)) val activeStagesTable = new StageTableBase(parent.store, request, activeStages, "", "activeStage", parent.basePath, "stages/pool", parent.isFairScheduler, parent.killEnabled, false) val poolTable = new PoolTable(Map(pool -> uiPool), parent) var content = <h4>Summary </h4> ++ poolTable.toNodeSeq if (activeStages.nonEmpty) { content ++= <h4>Active Stages ({activeStages.size})</h4> ++ activeStagesTable.toNodeSeq } UIUtils.headerSparkPage("Fair Scheduler Pool: " + poolName, content, parent) } }
Example 87
Source File: PagedTableSuite.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.ui import scala.xml.Node import org.apache.spark.SparkFunSuite class PagedDataSourceSuite extends SparkFunSuite { test("basic") { val dataSource1 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource1.pageData(1) === PageData(3, (1 to 2))) val dataSource2 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource2.pageData(2) === PageData(3, (3 to 4))) val dataSource3 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource3.pageData(3) === PageData(3, Seq(5))) val dataSource4 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) val e1 = intercept[IndexOutOfBoundsException] { dataSource4.pageData(4) } assert(e1.getMessage === "Page 4 is out of range. Please select a page number between 1 and 3.") val dataSource5 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) val e2 = intercept[IndexOutOfBoundsException] { dataSource5.pageData(0) } assert(e2.getMessage === "Page 0 is out of range. Please select a page number between 1 and 3.") } } class PagedTableSuite extends SparkFunSuite { test("pageNavigation") { // Create a fake PagedTable to test pageNavigation val pagedTable = new PagedTable[Int] { override def tableId: String = "" override def tableCssClass: String = "" override def dataSource: PagedDataSource[Int] = null override def pageLink(page: Int): String = page.toString override def headers: Seq[Node] = Nil override def row(t: Int): Seq[Node] = Nil override def pageSizeFormField: String = "pageSize" override def prevPageSizeFormField: String = "prevPageSize" override def pageNumberFormField: String = "page" override def goButtonFormPath: String = "" } assert(pagedTable.pageNavigation(1, 10, 1) === Nil) assert( (pagedTable.pageNavigation(1, 10, 2).head \\ "li").map(_.text.trim) === Seq("1", "2", ">")) assert( (pagedTable.pageNavigation(2, 10, 2).head \\ "li").map(_.text.trim) === Seq("<", "1", "2")) assert((pagedTable.pageNavigation(1, 10, 100).head \\ "li").map(_.text.trim) === (1 to 10).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(2, 10, 100).head \\ "li").map(_.text.trim) === Seq("<") ++ (1 to 10).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(100, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 100).map(_.toString)) assert((pagedTable.pageNavigation(99, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 100).map(_.toString) ++ Seq(">")) assert((pagedTable.pageNavigation(11, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (11 to 20).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(93, 10, 97).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 97).map(_.toString) ++ Seq(">")) } } private[spark] class SeqPagedDataSource[T](seq: Seq[T], pageSize: Int) extends PagedDataSource[T](pageSize) { override protected def dataSize: Int = seq.size override protected def sliceData(from: Int, to: Int): Seq[T] = seq.slice(from, to) }
Example 88
Source File: MesosClusterPage.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.mesos.ui import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.mesos.Protos.TaskStatus import org.apache.spark.deploy.mesos.MesosDriverDescription import org.apache.spark.scheduler.cluster.mesos.MesosClusterSubmissionState import org.apache.spark.ui.{UIUtils, WebUIPage} private[mesos] class MesosClusterPage(parent: MesosClusterUI) extends WebUIPage("") { def render(request: HttpServletRequest): Seq[Node] = { val state = parent.scheduler.getSchedulerState() val queuedHeaders = Seq("Driver ID", "Submit Date", "Main Class", "Driver Resources") val driverHeaders = queuedHeaders ++ Seq("Start Date", "Mesos Slave ID", "State") val retryHeaders = Seq("Driver ID", "Submit Date", "Description") ++ Seq("Last Failed Status", "Next Retry Time", "Attempt Count") val queuedTable = UIUtils.listingTable(queuedHeaders, queuedRow, state.queuedDrivers) val launchedTable = UIUtils.listingTable(driverHeaders, driverRow, state.launchedDrivers) val finishedTable = UIUtils.listingTable(driverHeaders, driverRow, state.finishedDrivers) val retryTable = UIUtils.listingTable(retryHeaders, retryRow, state.pendingRetryDrivers) val content = <p>Mesos Framework ID: {state.frameworkId}</p> <div class="row-fluid"> <div class="span12"> <h4>Queued Drivers:</h4> {queuedTable} <h4>Launched Drivers:</h4> {launchedTable} <h4>Finished Drivers:</h4> {finishedTable} <h4>Supervise drivers waiting for retry:</h4> {retryTable} </div> </div>; UIUtils.basicSparkPage(content, "Spark Drivers for Mesos cluster") } private def queuedRow(submission: MesosDriverDescription): Seq[Node] = { val id = submission.submissionId <tr> <td><a href={s"driver?id=$id"}>{id}</a></td> <td>{submission.submissionDate}</td> <td>{submission.command.mainClass}</td> <td>cpus: {submission.cores}, mem: {submission.mem}</td> </tr> } private def driverRow(state: MesosClusterSubmissionState): Seq[Node] = { val id = state.driverDescription.submissionId <tr> <td><a href={s"driver?id=$id"}>{id}</a></td> <td>{state.driverDescription.submissionDate}</td> <td>{state.driverDescription.command.mainClass}</td> <td>cpus: {state.driverDescription.cores}, mem: {state.driverDescription.mem}</td> <td>{state.startDate}</td> <td>{state.slaveId.getValue}</td> <td>{stateString(state.mesosTaskStatus)}</td> </tr> } private def retryRow(submission: MesosDriverDescription): Seq[Node] = { val id = submission.submissionId <tr> <td><a href={s"driver?id=$id"}>{id}</a></td> <td>{submission.submissionDate}</td> <td>{submission.command.mainClass}</td> <td>{submission.retryState.get.lastFailureStatus}</td> <td>{submission.retryState.get.nextRetry}</td> <td>{submission.retryState.get.retries}</td> </tr> } private def stateString(status: Option[TaskStatus]): String = { if (status.isEmpty) { return "" } val sb = new StringBuilder val s = status.get sb.append(s"State: ${s.getState}") if (status.get.hasMessage) { sb.append(s", Message: ${s.getMessage}") } if (status.get.hasHealthy) { sb.append(s", Healthy: ${s.getHealthy}") } if (status.get.hasSource) { sb.append(s", Source: ${s.getSource}") } if (status.get.hasReason) { sb.append(s", Reason: ${s.getReason}") } if (status.get.hasTimestamp) { sb.append(s", Time: ${s.getTimestamp}") } sb.toString() } }
Example 89
Source File: HistoryNotFoundPage.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master.ui import java.net.URLDecoder import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class HistoryNotFoundPage(parent: MasterWebUI) extends WebUIPage("history/not-found") { def render(request: HttpServletRequest): Seq[Node] = { val titleParam = request.getParameter("title") val msgParam = request.getParameter("msg") val exceptionParam = request.getParameter("exception") // If no parameters are specified, assume the user did not enable event logging val defaultTitle = "Event logging is not enabled" val defaultContent = <div class="row-fluid"> <div class="span12" style="font-size:14px"> No event logs were found for this application! To <a href="http://spark.apache.org/docs/latest/monitoring.html">enable event logging</a>, set <span style="font-style:italic">spark.eventLog.enabled</span> to true and <span style="font-style:italic">spark.eventLog.dir</span> to the directory to which your event logs are written. </div> </div> val title = Option(titleParam).getOrElse(defaultTitle) val content = Option(msgParam) .map { msg => URLDecoder.decode(msg, "UTF-8") } .map { msg => <div class="row-fluid"> <div class="span12" style="font-size:14px">{msg}</div> </div> ++ Option(exceptionParam) .map { e => URLDecoder.decode(e, "UTF-8") } .map { e => <pre>{e}</pre> } .getOrElse(Seq.empty) }.getOrElse(defaultContent) UIUtils.basicSparkPage(content, title) } }
Example 90
Source File: ApplicationPage.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master.ui import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.deploy.ExecutorState import org.apache.spark.deploy.DeployMessages.{MasterStateResponse, RequestMasterState} import org.apache.spark.deploy.master.ExecutorDesc import org.apache.spark.ui.{UIUtils, WebUIPage} import org.apache.spark.util.Utils private[ui] class ApplicationPage(parent: MasterWebUI) extends WebUIPage("app") { private val master = parent.masterEndpointRef def render(request: HttpServletRequest): Seq[Node] = { val appId = request.getParameter("appId") val state = master.askWithRetry[MasterStateResponse](RequestMasterState) val app = state.activeApps.find(_.id == appId).getOrElse({ state.completedApps.find(_.id == appId).getOrElse(null) }) if (app == null) { val msg = <div class="row-fluid">No running application with ID {appId}</div> return UIUtils.basicSparkPage(msg, "Not Found") } val executorHeaders = Seq("ExecutorID", "Worker", "Cores", "Memory", "State", "Logs") val allExecutors = (app.executors.values ++ app.removedExecutors).toSet.toSeq // This includes executors that are either still running or have exited cleanly val executors = allExecutors.filter { exec => !ExecutorState.isFinished(exec.state) || exec.state == ExecutorState.EXITED } val removedExecutors = allExecutors.diff(executors) val executorsTable = UIUtils.listingTable(executorHeaders, executorRow, executors) val removedExecutorsTable = UIUtils.listingTable(executorHeaders, executorRow, removedExecutors) val content = <div class="row-fluid"> <div class="span12"> <ul class="unstyled"> <li><strong>ID:</strong> {app.id}</li> <li><strong>Name:</strong> {app.desc.name}</li> <li><strong>User:</strong> {app.desc.user}</li> <li><strong>Cores:</strong> { if (app.desc.maxCores.isEmpty) { "Unlimited (%s granted)".format(app.coresGranted) } else { "%s (%s granted, %s left)".format( app.desc.maxCores.get, app.coresGranted, app.coresLeft) } } </li> <li> <strong>Executor Memory:</strong> {Utils.megabytesToString(app.desc.memoryPerExecutorMB)} </li> <li><strong>Submit Date:</strong> {app.submitDate}</li> <li><strong>State:</strong> {app.state}</li> <li><strong><a href={app.curAppUIUrl}>Application Detail UI</a></strong></li> </ul> </div> </div> <div class="row-fluid"> <!-- Executors --> <div class="span12"> <h4> Executor Summary </h4> {executorsTable} { if (removedExecutors.nonEmpty) { <h4> Removed Executors </h4> ++ removedExecutorsTable } } </div> </div>; UIUtils.basicSparkPage(content, "Application: " + app.desc.name) } private def executorRow(executor: ExecutorDesc): Seq[Node] = { <tr> <td>{executor.id}</td> <td> <a href={executor.worker.webUiAddress}>{executor.worker.id}</a> </td> <td>{executor.cores}</td> <td>{executor.memory}</td> <td>{executor.state}</td> <td> <a href={"%s/logPage?appId=%s&executorId=%s&logType=stdout" .format(executor.worker.webUiAddress, executor.application.id, executor.id)}>stdout</a> <a href={"%s/logPage?appId=%s&executorId=%s&logType=stderr" .format(executor.worker.webUiAddress, executor.application.id, executor.id)}>stderr</a> </td> </tr> } }
Example 91
Source File: ExecutorThreadDumpPage.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.exec import javax.servlet.http.HttpServletRequest import scala.util.Try import scala.xml.{Text, Node} import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class ExecutorThreadDumpPage(parent: ExecutorsTab) extends WebUIPage("threadDump") { private val sc = parent.sc def render(request: HttpServletRequest): Seq[Node] = { val executorId = Option(request.getParameter("executorId")).map { executorId => UIUtils.decodeURLParameter(executorId) }.getOrElse { throw new IllegalArgumentException(s"Missing executorId parameter") } val time = System.currentTimeMillis() val maybeThreadDump = sc.get.getExecutorThreadDump(executorId) val content = maybeThreadDump.map { threadDump => val dumpRows = threadDump.sortWith { case (threadTrace1, threadTrace2) => { val v1 = if (threadTrace1.threadName.contains("Executor task launch")) 1 else 0 val v2 = if (threadTrace2.threadName.contains("Executor task launch")) 1 else 0 if (v1 == v2) { threadTrace1.threadName.toLowerCase < threadTrace2.threadName.toLowerCase } else { v1 > v2 } } }.map { thread => val threadName = thread.threadName val className = "accordion-heading " + { if (threadName.contains("Executor task launch")) { "executor-thread" } else { "non-executor-thread" } } <div class="accordion-group"> <div class={className} onclick="$(this).next().toggleClass('hidden')"> <a class="accordion-toggle"> Thread {thread.threadId}: {threadName} ({thread.threadState}) </a> </div> <div class="accordion-body hidden"> <div class="accordion-inner"> <pre>{thread.stackTrace}</pre> </div> </div> </div> } <div class="row-fluid"> <p>Updated at {UIUtils.formatDate(time)}</p> { // scalastyle:off <p><a class="expandbutton" onClick="$('.accordion-body').removeClass('hidden'); $('.expandbutton').toggleClass('hidden')"> Expand All </a></p> <p><a class="expandbutton hidden" onClick="$('.accordion-body').addClass('hidden'); $('.expandbutton').toggleClass('hidden')"> Collapse All </a></p> // scalastyle:on } <div class="accordion">{dumpRows}</div> </div> }.getOrElse(Text("Error fetching thread dump")) UIUtils.headerSparkPage(s"Thread dump for executor $executorId", content, parent) } }
Example 92
Source File: EnvironmentPage.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.env import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.ui.{UIUtils, WebUIPage} private[ui] class EnvironmentPage(parent: EnvironmentTab) extends WebUIPage("") { private val listener = parent.listener def render(request: HttpServletRequest): Seq[Node] = { val runtimeInformationTable = UIUtils.listingTable( propertyHeader, jvmRow, listener.jvmInformation, fixedWidth = true) val sparkPropertiesTable = UIUtils.listingTable( propertyHeader, propertyRow, listener.sparkProperties, fixedWidth = true) val systemPropertiesTable = UIUtils.listingTable( propertyHeader, propertyRow, listener.systemProperties, fixedWidth = true) val classpathEntriesTable = UIUtils.listingTable( classPathHeaders, classPathRow, listener.classpathEntries, fixedWidth = true) val content = <span> <h4>Runtime Information</h4> {runtimeInformationTable} <h4>Spark Properties</h4> {sparkPropertiesTable} <h4>System Properties</h4> {systemPropertiesTable} <h4>Classpath Entries</h4> {classpathEntriesTable} </span> UIUtils.headerSparkPage("Environment", content, parent) } private def propertyHeader = Seq("Name", "Value") private def classPathHeaders = Seq("Resource", "Source") private def jvmRow(kv: (String, String)) = <tr><td>{kv._1}</td><td>{kv._2}</td></tr> private def propertyRow(kv: (String, String)) = <tr><td>{kv._1}</td><td>{kv._2}</td></tr> private def classPathRow(data: (String, String)) = <tr><td>{data._1}</td><td>{data._2}</td></tr> }
Example 93
Source File: PoolTable.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.jobs import java.net.URLEncoder import scala.collection.mutable.HashMap import scala.xml.Node import org.apache.spark.scheduler.{Schedulable, StageInfo} import org.apache.spark.ui.UIUtils private[ui] class PoolTable(pools: Seq[Schedulable], parent: StagesTab) { private val listener = parent.progressListener def toNodeSeq: Seq[Node] = { listener.synchronized { poolTable(poolRow, pools) } } private def poolTable( makeRow: (Schedulable, HashMap[String, HashMap[Int, StageInfo]]) => Seq[Node], rows: Seq[Schedulable]): Seq[Node] = { <table class="table table-bordered table-striped table-condensed sortable table-fixed"> <thead> <th>Pool Name</th> <th>Minimum Share</th> <th>Pool Weight</th> <th>Active Stages</th> <th>Running Tasks</th> <th>SchedulingMode</th> </thead> <tbody> {rows.map(r => makeRow(r, listener.poolToActiveStages))} </tbody> </table> } private def poolRow( p: Schedulable, poolToActiveStages: HashMap[String, HashMap[Int, StageInfo]]): Seq[Node] = { val activeStages = poolToActiveStages.get(p.name) match { case Some(stages) => stages.size case None => 0 } val href = "%s/stages/pool?poolname=%s" .format(UIUtils.prependBaseUri(parent.basePath), URLEncoder.encode(p.name, "UTF-8")) <tr> <td> <a href={href}>{p.name}</a> </td> <td>{p.minShare}</td> <td>{p.weight}</td> <td>{activeStages}</td> <td>{p.runningTasks}</td> <td>{p.schedulingMode}</td> </tr> } }
Example 94
Source File: PoolPage.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.ui.jobs import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.apache.spark.scheduler.StageInfo import org.apache.spark.ui.{WebUIPage, UIUtils} private[ui] class PoolPage(parent: StagesTab) extends WebUIPage("pool") { private val sc = parent.sc private val listener = parent.progressListener def render(request: HttpServletRequest): Seq[Node] = { listener.synchronized { val poolName = Option(request.getParameter("poolname")).map { poolname => UIUtils.decodeURLParameter(poolname) }.getOrElse { throw new IllegalArgumentException(s"Missing poolname parameter") } val poolToActiveStages = listener.poolToActiveStages val activeStages = poolToActiveStages.get(poolName) match { case Some(s) => s.values.toSeq case None => Seq[StageInfo]() } val activeStagesTable = new StageTableBase(activeStages.sortBy(_.submissionTime).reverse, parent.basePath, parent.progressListener, isFairScheduler = parent.isFairScheduler, killEnabled = parent.killEnabled) // For now, pool information is only accessible in live UIs val pools = sc.map(_.getPoolForName(poolName).getOrElse { throw new IllegalArgumentException(s"Unknown poolname: $poolName") }).toSeq val poolTable = new PoolTable(pools, parent) val content = <h4>Summary </h4> ++ poolTable.toNodeSeq ++ <h4>{activeStages.size} Active Stages</h4> ++ activeStagesTable.toNodeSeq UIUtils.headerSparkPage("Fair Scheduler Pool: " + poolName, content, parent) } } }
Example 95
Source File: PagedTableSuite.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.ui import scala.xml.Node import org.apache.spark.SparkFunSuite class PagedDataSourceSuite extends SparkFunSuite { test("basic") { val dataSource1 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource1.pageData(1) === PageData(3, (1 to 2))) val dataSource2 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource2.pageData(2) === PageData(3, (3 to 4))) val dataSource3 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) assert(dataSource3.pageData(3) === PageData(3, Seq(5))) val dataSource4 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) val e1 = intercept[IndexOutOfBoundsException] { dataSource4.pageData(4) } assert(e1.getMessage === "Page 4 is out of range. Please select a page number between 1 and 3.") val dataSource5 = new SeqPagedDataSource[Int](1 to 5, pageSize = 2) val e2 = intercept[IndexOutOfBoundsException] { dataSource5.pageData(0) } assert(e2.getMessage === "Page 0 is out of range. Please select a page number between 1 and 3.") } } class PagedTableSuite extends SparkFunSuite { test("pageNavigation") { // Create a fake PagedTable to test pageNavigation val pagedTable = new PagedTable[Int] { override def tableId: String = "" override def tableCssClass: String = "" override def dataSource: PagedDataSource[Int] = null override def pageLink(page: Int): String = page.toString override def headers: Seq[Node] = Nil override def row(t: Int): Seq[Node] = Nil override def goButtonJavascriptFunction: (String, String) = ("", "") } assert(pagedTable.pageNavigation(1, 10, 1) === Nil) assert( (pagedTable.pageNavigation(1, 10, 2).head \\ "li").map(_.text.trim) === Seq("1", "2", ">")) assert( (pagedTable.pageNavigation(2, 10, 2).head \\ "li").map(_.text.trim) === Seq("<", "1", "2")) assert((pagedTable.pageNavigation(1, 10, 100).head \\ "li").map(_.text.trim) === (1 to 10).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(2, 10, 100).head \\ "li").map(_.text.trim) === Seq("<") ++ (1 to 10).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(100, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 100).map(_.toString)) assert((pagedTable.pageNavigation(99, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 100).map(_.toString) ++ Seq(">")) assert((pagedTable.pageNavigation(11, 10, 100).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (11 to 20).map(_.toString) ++ Seq(">", ">>")) assert((pagedTable.pageNavigation(93, 10, 97).head \\ "li").map(_.text.trim) === Seq("<<", "<") ++ (91 to 97).map(_.toString) ++ Seq(">")) } } private[spark] class SeqPagedDataSource[T](seq: Seq[T], pageSize: Int) extends PagedDataSource[T](pageSize) { override protected def dataSize: Int = seq.size override protected def sliceData(from: Int, to: Int): Seq[T] = seq.slice(from, to) }
Example 96
Source File: StagePageSuite.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.ui import javax.servlet.http.HttpServletRequest import scala.xml.Node import org.mockito.Mockito.{mock, when, RETURNS_SMART_NULLS} import org.apache.spark._ import org.apache.spark.executor.TaskMetrics import org.apache.spark.scheduler._ import org.apache.spark.ui.jobs.{JobProgressListener, StagePage, StagesTab} import org.apache.spark.ui.scope.RDDOperationGraphListener class StagePageSuite extends SparkFunSuite with LocalSparkContext { test("peak execution memory only displayed if unsafe is enabled") { val unsafeConf = "spark.sql.unsafe.enabled" val conf = new SparkConf(false).set(unsafeConf, "true") val html = renderStagePage(conf).toString().toLowerCase val targetString = "peak execution memory" assert(html.contains(targetString)) // Disable unsafe and make sure it's not there val conf2 = new SparkConf(false).set(unsafeConf, "false") val html2 = renderStagePage(conf2).toString().toLowerCase assert(!html2.contains(targetString)) // Avoid setting anything; it should be displayed by default val conf3 = new SparkConf(false) val html3 = renderStagePage(conf3).toString().toLowerCase assert(html3.contains(targetString)) } test("SPARK-10543: peak execution memory should be per-task rather than cumulative") { val unsafeConf = "spark.sql.unsafe.enabled" val conf = new SparkConf(false).set(unsafeConf, "true") val html = renderStagePage(conf).toString().toLowerCase // verify min/25/50/75/max show task value not cumulative values assert(html.contains("<td>10.0 b</td>" * 5)) } private def renderStagePage(conf: SparkConf): Seq[Node] = { val jobListener = new JobProgressListener(conf) val graphListener = new RDDOperationGraphListener(conf) val tab = mock(classOf[StagesTab], RETURNS_SMART_NULLS) val request = mock(classOf[HttpServletRequest]) when(tab.conf).thenReturn(conf) when(tab.progressListener).thenReturn(jobListener) when(tab.operationGraphListener).thenReturn(graphListener) when(tab.appName).thenReturn("testing") when(tab.headerTabs).thenReturn(Seq.empty) when(request.getParameter("id")).thenReturn("0") when(request.getParameter("attempt")).thenReturn("0") val page = new StagePage(tab) // Simulate a stage in job progress listener val stageInfo = new StageInfo(0, 0, "dummy", 1, Seq.empty, Seq.empty, "details") // Simulate two tasks to test PEAK_EXECUTION_MEMORY correctness (1 to 2).foreach { taskId => val taskInfo = new TaskInfo(taskId, taskId, 0, 0, "0", "localhost", TaskLocality.ANY, false) val peakExecutionMemory = 10 taskInfo.accumulables += new AccumulableInfo(0, InternalAccumulator.PEAK_EXECUTION_MEMORY, Some(peakExecutionMemory.toString), (peakExecutionMemory * taskId).toString, true) jobListener.onStageSubmitted(SparkListenerStageSubmitted(stageInfo)) jobListener.onTaskStart(SparkListenerTaskStart(0, 0, taskInfo)) taskInfo.markSuccessful() jobListener.onTaskEnd( SparkListenerTaskEnd(0, 0, "result", Success, taskInfo, TaskMetrics.empty)) } jobListener.onStageCompleted(SparkListenerStageCompleted(stageInfo)) page.render(request) } }
Example 97
Source File: BasicElemFormats.scala From xmlconfect with Apache License 2.0 | 5 votes |
package com.mthaler.xmlconfect import scala.reflect._ import scala.xml.{ Node, Null, Text } object BasicElemFormats { implicit object BooleanXmlElemFormat extends SimpleXmlElemFormat[Boolean] { protected def readElem(node: Node, name: String = ""): Boolean = node.text.toBoolean } implicit object ByteXmlElemFormat extends SimpleXmlElemFormat[Byte] { protected def readElem(node: Node, name: String = ""): Byte = node.text.toByte } implicit object ShortXmlElemFormat extends SimpleXmlElemFormat[Short] { protected def readElem(node: Node, name: String = ""): Short = node.text.toShort } implicit object IntXmlElemFormat extends SimpleXmlElemFormat[Int] { protected def readElem(node: Node, name: String = ""): Int = node.text.toInt } implicit object LongXmlElemFormat extends SimpleXmlElemFormat[Long] { protected def readElem(node: Node, name: String = ""): Long = node.text.toLong } implicit object FloatXmlElemFormat extends SimpleXmlElemFormat[Float] { protected def readElem(node: Node, name: String = ""): Float = node.text.toFloat } implicit object DoubleXmlElemFormat extends SimpleXmlElemFormat[Double] { protected def readElem(node: Node, name: String = ""): Double = node.text.toDouble } implicit object StringXmlElemFormat extends SimpleXmlElemFormat[String] { protected def readElem(node: Node, name: String = ""): String = node.text } implicit object CharXmlElemFormat extends SimpleXmlElemFormat[Char] { protected def readElem(node: Node, name: String = ""): Char = { val txt = node.text if (txt.length == 1) txt.charAt(0) else deserializationError("Expected Char as single-character string, but got " + txt) } } implicit object SymbolXmlElemFormat extends SimpleXmlElemFormat[Symbol] { protected def readElem(node: Node, name: String = ""): Symbol = Symbol(node.text) protected override def writeElem(obj: Symbol, name: String = ""): Node = elem(name, Null, Seq(Text(obj.name))) } implicit object BigIntXmlElemFormat extends SimpleXmlElemFormat[BigInt] { protected def readElem(node: Node, name: String = ""): BigInt = BigInt(node.text) } implicit object BigDecimalXmlElemFormat extends SimpleXmlElemFormat[BigDecimal] { protected def readElem(node: Node, name: String = ""): BigDecimal = BigDecimal(node.text) } implicit def enumFormat[T <: Enum[T]: ClassTag] = new SimpleXmlElemFormat[T] { protected def readElem(node: Node, name: String = ""): T = { val c = classTag[T].runtimeClass.asInstanceOf[Class[T]] Enum.valueOf(c, node.text) } } }
Example 98
Source File: BasicTextFormats.scala From xmlconfect with Apache License 2.0 | 5 votes |
package com.mthaler.xmlconfect import scala.reflect._ import scala.xml.{ Null, Node, Text } object BasicTextFormats { implicit object BooleanXmlTextFormat extends SimpleXmlTextFormat[Boolean] { protected def readText(text: Text, name: String = ""): Boolean = text.text.toBoolean } implicit object ByteXmlTextFormat extends SimpleXmlTextFormat[Byte] { protected def readText(text: Text, name: String = ""): Byte = text.text.toByte } implicit object ShortXmlTextFormat extends SimpleXmlTextFormat[Short] { protected def readText(text: Text, name: String = ""): Short = text.text.toShort } implicit object IntXmlTextFormat extends SimpleXmlTextFormat[Int] { protected def readText(text: Text, name: String = ""): Int = text.text.toInt } implicit object LongXmlTextFormat extends SimpleXmlTextFormat[Long] { protected def readText(text: Text, name: String = ""): Long = text.text.toLong } implicit object FloatXmlTextFormat extends SimpleXmlTextFormat[Float] { protected def readText(text: Text, name: String = ""): Float = text.text.toFloat } implicit object DoubleXmlTextFormat extends SimpleXmlTextFormat[Double] { protected def readText(text: Text, name: String = ""): Double = text.text.toDouble } implicit object StringXmlTextFormat extends SimpleXmlTextFormat[String] { protected def readText(text: Text, name: String = ""): String = text.text } implicit object CharXmlTextFormat extends SimpleXmlTextFormat[Char] { protected def readText(text: Text, name: String = ""): Char = { val txt = text.text if (txt.length == 1) txt.charAt(0) else deserializationError("Expected Char as single-character string, but got " + txt) } } implicit object SymbolXmlTextFormat extends SimpleXmlTextFormat[Symbol] { protected def readText(text: Text, name: String = ""): Symbol = Symbol(text.text) protected override def writeElem(obj: Symbol, name: String = ""): Node = Text(obj.name) } implicit object BigIntXmlTextFormat extends SimpleXmlTextFormat[BigInt] { protected def readText(text: Text, name: String = ""): BigInt = BigInt(text.text) } implicit object BigDecimalXmlTextFormat extends SimpleXmlTextFormat[BigDecimal] { protected def readText(text: Text, name: String = ""): BigDecimal = BigDecimal(text.text) } implicit def enumFormat[T <: Enum[T]: ClassTag] = new SimpleXmlTextFormat[T] { protected def readText(text: Text, name: String = ""): T = { val c = classTag[T].runtimeClass.asInstanceOf[Class[T]] Enum.valueOf(c, text.text) } } }
Example 99
Source File: SQLConnectorParser.scala From fusion-data with Apache License 2.0 | 5 votes |
package mass.connector.sql import java.util.Properties import helloscala.common.Configuration import mass.connector.{ ConnectorParser, ConnectorSetting } import mass.core.XmlUtils import scala.xml.Node class SQLConnectorParser extends ConnectorParser { import mass.core.XmlUtils.XmlRich override val `type` = "jdbc" def parseSettingFromXML(node: Node): ConnectorSetting = { val props = new Properties() val id = node.attr("name") props.put("poolName", id) (node \\ "props" \\ "prop").foreach { prop => val key = (prop \\ "@key").text val value = getText(prop) props.put(key, value) } ConnectorSetting(Configuration.load(props)) } override def parseFromXML(node: Node): SQLConnector = { val setting = parseSettingFromXML(node) SQLConnector(node.attr("name"), setting) } @inline private def getText(prop: Node): String = prop.getAttr("value").getOrElse(XmlUtils.text(prop \ "value")) }
Example 100
Source File: Poets.scala From Scalaprof with GNU General Public License v2.0 | 5 votes |
package edu.neu.coe.csye._7200.poets import scala.xml.{Node, NodeSeq, XML} case class Name(name: String, language: String) { def toXML = <name language={language}>{name}</name> } case class Poet(names: Seq[Name]) { def toXML = <poet>{names map (_.toXML)}</poet> } object Poet { def fromXML(node: Node) = Poet(Name.fromXML(node \ "name")) } object Name { def getLanguage(x: Option[Seq[Node]]) = x match {case Some(Seq(y)) => y.text; case _ => ""} def fromXML(nodes: NodeSeq): Seq[Name] = for { node <- nodes } yield Name(node.text,getLanguage(node.attribute("language"))) } object Poets extends App { import spray.json._ type PoetSeq = Seq[Poet] def toXML(poets: PoetSeq) = poets map {_ toXML} val xml = XML.loadFile("poets.xml") val poets: PoetSeq = for ( poet <- xml \\ "poet" ) yield Poet.fromXML(poet) case class Poets(poets: PoetSeq) object PoetsJsonProtocol extends DefaultJsonProtocol { implicit val nameFormat = jsonFormat2(Name.apply) implicit val poetFormat = ??? // TODO 5 points implicit val poetsFormat = jsonFormat1(Poets) } ??? // TODO 25 points. Write poets out as Json. Show the Json in the console... // ...Read the Json file back as poets1 and write that out as XML. Show it on console. // Show the comparison of the XML file you produced with the poets.xml file (as part of your submission). }
Example 101
Source File: WebCrawler.scala From Scalaprof with GNU General Public License v2.0 | 5 votes |
package edu.neu.coe.csye._7200.crawler import java.net.URL import edu.neu.coe.csye._7200.MonadOps import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent._ import scala.concurrent.duration._ import scala.io.Source import scala.util._ import scala.xml.Node object WebCrawler extends App { def getURLContent(u: URL): Future[String] = { println(s"url: $u") for { source <- Future(Source.fromURL(u)) } yield source mkString } // def countWords(sy: Try[String]) = // for { gD <- sy; n <- HTMLParser.parse(gD); gN <- Try(n.text); l <- Try(gN.split(" ").length) } yield l def wget(u: URL): Future[Seq[URL]] = { // TODO implement. 16 points. Hint: write as a for-comprehension, using the constructor new URL(URL,String) to get the appropriate URL for relative links def getURLs(ns: Node): Seq[URL] = ??? def getLinks(g: String): Try[Seq[URL]] = for (n <- HTMLParser.parse(g) recoverWith { case f => Failure(new RuntimeException(s"parse problem with URL $u: $f")) }) yield getURLs(n) // TODO implement. 9 points. Hint: write as a for-comprehension, using getURLContent (above) and getLinks above. You might also need MonadOps.asFuture ??? } def wget(us: Seq[URL]): Future[Seq[Either[Throwable, Seq[URL]]]] = { val us2 = us.distinct take 10 // TODO implement the rest of this, based on us2 instead of us. 15 points. // Hint: Use wget(URL) (above). MonadOps.sequence and Future.sequence are also available to you to use. ??? } def crawler(depth: Int, args: Seq[URL]): Future[Seq[URL]] = { def inner(urls: Seq[URL], depth: Int, accum: Seq[URL]): Future[Seq[URL]] = if (depth > 0) for (us <- MonadOps.flattenRecover(wget(urls), { x => System.err.println(x) }); r <- inner(us, depth - 1, accum ++: urls)) yield r else Future.successful(accum) inner(args, depth, Nil) } println(s"web reader: ${args.toList}") val urls = for (arg <- args toList) yield Try(new URL(arg)) // println(urls) val s = MonadOps.sequence(urls) s match { case Success(z) => // println(s"invoking crawler on $z") val f = crawler(2, z) Await.ready(f, Duration("60 second")) for (x <- f) println(s"Links: $x") case Failure(z) => println(s"failure: $z") } }
Example 102
Source File: Helpers.scala From xmlrpc with MIT License | 4 votes |
package xmlrpc.protocol import xmlrpc.protocol.Deserializer.{DeserializationError, Deserialized} import scala.reflect.ClassTag import scala.util.{Failure, Success, Try} import scala.xml.transform.{RewriteRule, RuleTransformer} import scala.xml.{Elem, Node, NodeSeq} import scalaz.Scalaz._ trait Helpers { implicit class PimpedNode(node: NodeSeq) { def inValue = <value>{node}</value> def inParam = <param>{node}</param> } object FromRequestToResponse extends RewriteRule { override def transform(n: Node): Seq[Node] = n match { case e: Elem if e.label == "methodCall" => e.copy(label="methodResponse",child=e.child.tail.tail) case _ => n } } object ToResponse extends RuleTransformer(FromRequestToResponse) implicit class RequestTransformer(request: Node) { val asResponse: Node = ToResponse(request) } def makeNumericConversion[T : Datatype : ClassTag](f: String => T, input: String): Deserialized[T] = Try(f(input)) match { case Success(convertedValue) => convertedValue.success case Failure(e) => DeserializationError(s"The value $input couldn't be converted to a ${implicitly[ClassTag[T]].runtimeClass.getSimpleName}", Some(e)).failures } }
Example 103
Source File: XmlTransformer.scala From nexus with Apache License 2.0 | 4 votes |
import sbt._ import sbt.librarymanagement.ModuleFilter import scala.xml.transform.{RewriteRule, RuleTransformer} import scala.xml.{Elem, Node, NodeSeq} object XmlTransformer { def transformer(blacklist: ModuleFilter): RuleTransformer = new RuleTransformer(new RewriteRule { override def transform(node: Node): NodeSeq = node match { case e: Elem if e.label == "dependency" => val organization = e.child.filter(_.label == "groupId").flatMap(_.text).mkString val artifact = e.child.filter(_.label == "artifactId").flatMap(_.text).mkString val version = e.child.filter(_.label == "version").flatMap(_.text).mkString if (blacklist(organization % artifact % version)) NodeSeq.Empty else node case _ => node } }) }
Example 104
Source File: XMLUtils.scala From infinispan-spark with Apache License 2.0 | 4 votes |
package org.infinispan.spark.test import java.io.File import scala.xml.transform.{RewriteRule, RuleTransformer} import scala.xml.{Elem, Node, XML} import scala.language.postfixOps object XMLUtils { private def addChildToNode(element: Node, elementName: String, attributeName: String, attributeValue: String, elementToAdd: Node) = { object Rule extends RewriteRule { override def transform(n: Node): Seq[Node] = n match { case Elem(prefix, en, att, scope, child@_*) if en == elementName && att.asAttrMap.exists(t => t._1 == attributeName && t._2 == attributeValue) => Elem(prefix, en, att, scope, child.isEmpty, elementToAdd ++ child: _*) case other => other } } object Transform extends RuleTransformer(Rule) Transform(element) } def addCacheTemplate(cacheContainer: String, configFile: File): Unit = { val xmlFile = XML.loadFile(configFile) val exists = ((xmlFile \\ "cache-container").filter(n => n.attributes.asAttrMap.exists { case (k, v) => k.equals("name") && v.equals(cacheContainer) }) \ "replicated-cache-configuration" \ "@name" text) == "replicated" val cacheConfig = <replicated-cache-configuration name="replicated"/> if (!exists) { val newXML = XMLUtils.addChildToNode(xmlFile, "cache-container", "name", cacheContainer, cacheConfig) XML.save(configFile.getAbsolutePath, newXML, "UTF-8") } } }