javax.ws.rs.GET Scala Examples

The following examples show how to use javax.ws.rs.GET. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: OneJobResource.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{PathParam, GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.JobExecutionStatus
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.jobs.UIData.JobUIData
//注解@Produces用于定义方法的响应实体的数据类型,可以定义一个或多个
@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class OneJobResource(ui: SparkUI) {

  @GET
  def oneJob(@PathParam("jobId") jobId: Int): JobData = {
    val statusToJobs: Seq[(JobExecutionStatus, Seq[JobUIData])] =
      AllJobsResource.getStatusToJobs(ui)
    val jobOpt = statusToJobs.map {_._2} .flatten.find { jobInfo => jobInfo.jobId == jobId}
    jobOpt.map { job =>
      AllJobsResource.convertJobData(job, ui.jobProgressListener, false)
    }.getOrElse {
      throw new NotFoundException("unknown job: " + jobId)
    }
  }

} 
Example 2
Source File: ExecutorListResource.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    listener.synchronized {
      // The follow codes should be protected by `listener` to make sure no executors will be
      // removed before we query their status. See SPARK-12784.
      val storageStatusList = listener.activeStorageStatusList
      (0 until storageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = true)
      }
    }
  }
} 
Example 3
Source File: AllRDDResource.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.storage.{RDDInfo, StorageStatus, StorageUtils}
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.storage.StorageListener

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllRDDResource(ui: SparkUI) {

  @GET
  def rddList(): Seq[RDDStorageInfo] = {
    val storageStatusList = ui.storageListener.storageStatusList
    val rddInfos = ui.storageListener.rddInfoList
    rddInfos.map{rddInfo =>
      AllRDDResource.getRDDStorageInfo(rddInfo.id, rddInfo, storageStatusList,
        includeDetails = false)
    }
  }

}

private[spark] object AllRDDResource {

  def getRDDStorageInfo(
      rddId: Int,
      listener: StorageListener,
      includeDetails: Boolean): Option[RDDStorageInfo] = {
    val storageStatusList = listener.storageStatusList
    listener.rddInfoList.find { _.id == rddId }.map { rddInfo =>
      getRDDStorageInfo(rddId, rddInfo, storageStatusList, includeDetails)
    }
  }

  def getRDDStorageInfo(
      rddId: Int,
      rddInfo: RDDInfo,
      storageStatusList: Seq[StorageStatus],
      includeDetails: Boolean): RDDStorageInfo = {
    val workers = storageStatusList.map { (rddId, _) }
    val blockLocations = StorageUtils.getRddBlockLocations(rddId, storageStatusList)
    val blocks = storageStatusList
      .flatMap { _.rddBlocksById(rddId) }
      .sortWith { _._1.name < _._1.name }
      .map { case (blockId, status) =>
        (blockId, status, blockLocations.get(blockId).getOrElse(Seq[String]("Unknown")))
      }

    val dataDistribution = if (includeDetails) {
      Some(storageStatusList.map { status =>
        new RDDDataDistribution(
          address = status.blockManagerId.hostPort,
          memoryUsed = status.memUsedByRdd(rddId),
          memoryRemaining = status.memRemaining,
          diskUsed = status.diskUsedByRdd(rddId)
        ) } )
    } else {
      None
    }
    val partitions = if (includeDetails) {
      Some(blocks.map { case (id, block, locations) =>
        new RDDPartitionInfo(
          blockName = id.name,
          storageLevel = block.storageLevel.description,
          memoryUsed = block.memSize,
          diskUsed = block.diskSize,
          executors = locations
        )
      } )
    } else {
      None
    }

    new RDDStorageInfo(
      id = rddId,
      name = rddInfo.name,
      numPartitions = rddInfo.numPartitions,
      numCachedPartitions = rddInfo.numCachedPartitions,
      storageLevel = rddInfo.storageLevel.description,
      memoryUsed = rddInfo.memSize,
      diskUsed = rddInfo.diskSize,
      dataDistribution = dataDistribution,
      partitions = partitions
    )
  }
} 
Example 4
Source File: ApplicationListResource.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.util.{Arrays, Date, List => JList}
import javax.ws.rs.{DefaultValue, GET, Produces, QueryParam}
import javax.ws.rs.core.MediaType

import org.apache.spark.deploy.history.ApplicationHistoryInfo
import org.apache.spark.deploy.master.{ApplicationInfo => InternalApplicationInfo}

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ApplicationListResource(uiRoot: UIRoot) {

  @GET
  def appList(
      @QueryParam("status") status: JList[ApplicationStatus],
      @DefaultValue("2010-01-01") @QueryParam("minDate") minDate: SimpleDateParam,
      @DefaultValue("3000-01-01") @QueryParam("maxDate") maxDate: SimpleDateParam)
  : Iterator[ApplicationInfo] = {
    val allApps = uiRoot.getApplicationInfoList
    val adjStatus = {
      if (status.isEmpty) {
        Arrays.asList(ApplicationStatus.values(): _*)
      } else {
        status
      }
    }
    val includeCompleted = adjStatus.contains(ApplicationStatus.COMPLETED)
    val includeRunning = adjStatus.contains(ApplicationStatus.RUNNING)
    allApps.filter { app =>
      val anyRunning = app.attempts.exists(!_.completed)
      // if any attempt is still running, we consider the app to also still be running
      val statusOk = (!anyRunning && includeCompleted) ||
        (anyRunning && includeRunning)
      // keep the app if *any* attempts fall in the right time window
      val dateOk = app.attempts.exists { attempt =>
        attempt.startTime.getTime >= minDate.timestamp &&
          attempt.startTime.getTime <= maxDate.timestamp
      }
      statusOk && dateOk
    }
  }
}

private[spark] object ApplicationsListResource {
  def appHistoryInfoToPublicAppInfo(app: ApplicationHistoryInfo): ApplicationInfo = {
    new ApplicationInfo(
      id = app.id,
      name = app.name,
      attempts = app.attempts.map { internalAttemptInfo =>
        new ApplicationAttemptInfo(
          attemptId = internalAttemptInfo.attemptId,
          startTime = new Date(internalAttemptInfo.startTime),
          endTime = new Date(internalAttemptInfo.endTime),
          sparkUser = internalAttemptInfo.sparkUser,
          completed = internalAttemptInfo.completed
        )
      }
    )
  }

  def convertApplicationInfo(
      internal: InternalApplicationInfo,
      completed: Boolean): ApplicationInfo = {
    // standalone application info always has just one attempt
    new ApplicationInfo(
      id = internal.id,
      name = internal.desc.name,
      attempts = Seq(new ApplicationAttemptInfo(
        attemptId = None,
        startTime = new Date(internal.startTime),
        endTime = new Date(internal.endTime),
        sparkUser = internal.desc.user,
        completed = completed
      ))
    )
  }

} 
Example 5
Source File: OneJobResource.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{PathParam, GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.JobExecutionStatus
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.jobs.UIData.JobUIData

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class OneJobResource(ui: SparkUI) {

  @GET
  def oneJob(@PathParam("jobId") jobId: Int): JobData = {
    val statusToJobs: Seq[(JobExecutionStatus, Seq[JobUIData])] =
      AllJobsResource.getStatusToJobs(ui)
    val jobOpt = statusToJobs.map {_._2} .flatten.find { jobInfo => jobInfo.jobId == jobId}
    jobOpt.map { job =>
      AllJobsResource.convertJobData(job, ui.jobProgressListener, false)
    }.getOrElse {
      throw new NotFoundException("unknown job: " + jobId)
    }
  }

} 
Example 6
Source File: ExecutorListResource.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    val storageStatusList = listener.storageStatusList
    (0 until storageStatusList.size).map { statusId =>
      ExecutorsPage.getExecInfo(listener, statusId)
    }
  }
} 
Example 7
Source File: AllRDDResource.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.storage.{RDDInfo, StorageStatus, StorageUtils}
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.storage.StorageListener

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllRDDResource(ui: SparkUI) {

  @GET
  def rddList(): Seq[RDDStorageInfo] = {
    val storageStatusList = ui.storageListener.storageStatusList
    val rddInfos = ui.storageListener.rddInfoList
    rddInfos.map{rddInfo =>
      AllRDDResource.getRDDStorageInfo(rddInfo.id, rddInfo, storageStatusList,
        includeDetails = false)
    }
  }

}

private[spark] object AllRDDResource {

  def getRDDStorageInfo(
      rddId: Int,
      listener: StorageListener,
      includeDetails: Boolean): Option[RDDStorageInfo] = {
    val storageStatusList = listener.storageStatusList
    listener.rddInfoList.find { _.id == rddId }.map { rddInfo =>
      getRDDStorageInfo(rddId, rddInfo, storageStatusList, includeDetails)
    }
  }

  def getRDDStorageInfo(
      rddId: Int,
      rddInfo: RDDInfo,
      storageStatusList: Seq[StorageStatus],
      includeDetails: Boolean): RDDStorageInfo = {
    val workers = storageStatusList.map { (rddId, _) }
    val blockLocations = StorageUtils.getRddBlockLocations(rddId, storageStatusList)
    val blocks = storageStatusList
      .flatMap { _.rddBlocksById(rddId) }
      .sortWith { _._1.name < _._1.name }
      .map { case (blockId, status) =>
        (blockId, status, blockLocations.get(blockId).getOrElse(Seq[String]("Unknown")))
      }

    val dataDistribution = if (includeDetails) {
      Some(storageStatusList.map { status =>
        new RDDDataDistribution(
          address = status.blockManagerId.hostPort,
          memoryUsed = status.memUsedByRdd(rddId),
          memoryRemaining = status.memRemaining,
          diskUsed = status.diskUsedByRdd(rddId)
        ) } )
    } else {
      None
    }
    val partitions = if (includeDetails) {
      Some(blocks.map { case (id, block, locations) =>
        new RDDPartitionInfo(
          blockName = id.name,
          storageLevel = block.storageLevel.description,
          memoryUsed = block.memSize,
          diskUsed = block.diskSize,
          executors = locations
        )
      } )
    } else {
      None
    }

    new RDDStorageInfo(
      id = rddId,
      name = rddInfo.name,
      numPartitions = rddInfo.numPartitions,
      numCachedPartitions = rddInfo.numCachedPartitions,
      storageLevel = rddInfo.storageLevel.description,
      memoryUsed = rddInfo.memSize,
      diskUsed = rddInfo.diskSize,
      dataDistribution = dataDistribution,
      partitions = partitions
    )
  }
} 
Example 8
Source File: EventLogDownloadResource.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.util.zip.ZipOutputStream
import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.{MediaType, Response, StreamingOutput}

import scala.util.control.NonFatal

import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.deploy.SparkHadoopUtil

@Produces(Array(MediaType.APPLICATION_OCTET_STREAM))
private[v1] class EventLogDownloadResource(
    val uIRoot: UIRoot,
    val appId: String,
    val attemptId: Option[String]) extends Logging {
  val conf = SparkHadoopUtil.get.newConfiguration(new SparkConf)

  @GET
  def getEventLogs(): Response = {
    try {
      val fileName = {
        attemptId match {
          case Some(id) => s"eventLogs-$appId-$id.zip"
          case None => s"eventLogs-$appId.zip"
        }
      }
      //实现StreamingOutput接口
      val stream = new StreamingOutput {
        override def write(output: OutputStream): Unit = {
          //ZipOutputStream实现打包
          val zipStream = new ZipOutputStream(output)
          try {
            uIRoot.writeEventLogs(appId, attemptId, zipStream)
          } finally {
            zipStream.close()
          }

        }
      }

      Response.ok(stream)
        .header("Content-Disposition", s"attachment; filename=$fileName")
        .header("Content-Type", MediaType.APPLICATION_OCTET_STREAM)
        .build()
    } catch {
      case NonFatal(e) =>
        Response.serverError()
          .entity(s"Event logs are not available for app: $appId.")
          .status(Response.Status.SERVICE_UNAVAILABLE)
          .build()
    }
  }
} 
Example 9
Source File: ApplicationListResource.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.util.{Arrays, Date, List => JList}
import javax.ws.rs.{DefaultValue, GET, Produces, QueryParam}
import javax.ws.rs.core.MediaType

import org.apache.spark.deploy.history.ApplicationHistoryInfo
import org.apache.spark.deploy.master.{ApplicationInfo => InternalApplicationInfo}

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ApplicationListResource(uiRoot: UIRoot) {

  @GET
  def appList(
      @QueryParam("status") status: JList[ApplicationStatus],
      @DefaultValue("2010-01-01") @QueryParam("minDate") minDate: SimpleDateParam,
      @DefaultValue("3000-01-01") @QueryParam("maxDate") maxDate: SimpleDateParam)
  : Iterator[ApplicationInfo] = {
    val allApps = uiRoot.getApplicationInfoList
    val adjStatus = {
      if (status.isEmpty) {
        //可变参数时不能直接传入Range或集合或数组对象,需要使用:_*转换才可传入
        Arrays.asList(ApplicationStatus.values(): _*)
      } else {
        status
      }
    }
    val includeCompleted = adjStatus.contains(ApplicationStatus.COMPLETED)
    val includeRunning = adjStatus.contains(ApplicationStatus.RUNNING)
    allApps.filter { app =>
      val anyRunning = app.attempts.exists(!_.completed)
      // if any attempt is still running, we consider the app to also still be running
      //如果有任何尝试仍在运行,我们认为该应用仍然在运行
      val statusOk = (!anyRunning && includeCompleted) ||
        (anyRunning && includeRunning)
      // keep the app if *any* attempts fall in the right time window
      //如果*任何*尝试落在正确的时间窗口,请保留应用程序
      val dateOk = app.attempts.exists { attempt =>
        attempt.startTime.getTime >= minDate.timestamp &&
          attempt.startTime.getTime <= maxDate.timestamp
      }
      statusOk && dateOk
    }
  }
}

private[spark] object ApplicationsListResource {
  def appHistoryInfoToPublicAppInfo(app: ApplicationHistoryInfo): ApplicationInfo = {
    new ApplicationInfo(
      id = app.id,
      name = app.name,
      attempts = app.attempts.map { internalAttemptInfo =>
        new ApplicationAttemptInfo(
          attemptId = internalAttemptInfo.attemptId,
          startTime = new Date(internalAttemptInfo.startTime),
          endTime = new Date(internalAttemptInfo.endTime),
          sparkUser = internalAttemptInfo.sparkUser,
          completed = internalAttemptInfo.completed
        )
      }
    )
  }

  def convertApplicationInfo(
      internal: InternalApplicationInfo,
      completed: Boolean): ApplicationInfo = {
    // standalone application info always has just one attempt
    //独立的应用信息总是只有一个尝试
    new ApplicationInfo(
      id = internal.id,
      name = internal.desc.name,
      attempts = Seq(new ApplicationAttemptInfo(
        attemptId = None,
        startTime = new Date(internal.startTime),
        endTime = new Date(internal.endTime),
        sparkUser = internal.desc.user,
        completed = completed
      ))
    )
  }

} 
Example 10
Source File: OneRDDResource.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{PathParam, GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
//注解@Produces用于定义方法的响应实体的数据类型,可以定义一个或多个
@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class OneRDDResource(ui: SparkUI) {

  @GET
  def rddData(@PathParam("rddId") rddId: Int): RDDStorageInfo = {
    AllRDDResource.getRDDStorageInfo(rddId, ui.storageListener, true).getOrElse(
      throw new NotFoundException(s"no rdd found w/ id $rddId")
    )
  }

} 
Example 11
Source File: AllExecutorListResource.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    listener.synchronized {
      // The follow codes should be protected by `listener` to make sure no executors will be
      // removed before we query their status. See SPARK-12784.
      (0 until listener.activeStorageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = true)
      } ++ (0 until listener.deadStorageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = false)
      }
    }
  }
} 
Example 12
Source File: ExecutorListResource.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    listener.synchronized {
      // The follow codes should be protected by `listener` to make sure no executors will be
      // removed before we query their status. See SPARK-12784.
      //以下代码应由“监听器”保护,以确保在查询状态之前不会删除执行程序,参见SPARK-12784。
      val storageStatusList = listener.storageStatusList
      (0 until storageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId)
      }
    }
  }
} 
Example 13
Source File: ApplicationListResource.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.util.{Date, List => JList}
import javax.ws.rs.{DefaultValue, GET, Produces, QueryParam}
import javax.ws.rs.core.MediaType

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ApplicationListResource extends ApiRequestContext {

  @GET
  def appList(
      @QueryParam("status") status: JList[ApplicationStatus],
      @DefaultValue("2010-01-01") @QueryParam("minDate") minDate: SimpleDateParam,
      @DefaultValue("3000-01-01") @QueryParam("maxDate") maxDate: SimpleDateParam,
      @DefaultValue("2010-01-01") @QueryParam("minEndDate") minEndDate: SimpleDateParam,
      @DefaultValue("3000-01-01") @QueryParam("maxEndDate") maxEndDate: SimpleDateParam,
      @QueryParam("limit") limit: Integer)
  : Iterator[ApplicationInfo] = {

    val numApps = Option(limit).map(_.toInt).getOrElse(Integer.MAX_VALUE)
    val includeCompleted = status.isEmpty || status.contains(ApplicationStatus.COMPLETED)
    val includeRunning = status.isEmpty || status.contains(ApplicationStatus.RUNNING)

    uiRoot.getApplicationInfoList.filter { app =>
      val anyRunning = app.attempts.exists(!_.completed)
      // if any attempt is still running, we consider the app to also still be running;
      // keep the app if *any* attempts fall in the right time window
      ((!anyRunning && includeCompleted) || (anyRunning && includeRunning)) &&
      app.attempts.exists { attempt =>
        isAttemptInRange(attempt, minDate, maxDate, minEndDate, maxEndDate, anyRunning)
      }
    }.take(numApps)
  }

  private def isAttemptInRange(
      attempt: ApplicationAttemptInfo,
      minStartDate: SimpleDateParam,
      maxStartDate: SimpleDateParam,
      minEndDate: SimpleDateParam,
      maxEndDate: SimpleDateParam,
      anyRunning: Boolean): Boolean = {
    val startTimeOk = attempt.startTime.getTime >= minStartDate.timestamp &&
      attempt.startTime.getTime <= maxStartDate.timestamp
    // If the maxEndDate is in the past, exclude all running apps.
    val endTimeOkForRunning = anyRunning && (maxEndDate.timestamp > System.currentTimeMillis())
    val endTimeOkForCompleted = !anyRunning && (attempt.endTime.getTime >= minEndDate.timestamp &&
      attempt.endTime.getTime <= maxEndDate.timestamp)
    val endTimeOk = endTimeOkForRunning || endTimeOkForCompleted
    startTimeOk && endTimeOk
  }
} 
Example 14
Source File: AllRDDResource.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.storage.{RDDInfo, StorageStatus, StorageUtils}
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.storage.StorageListener

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllRDDResource(ui: SparkUI) {

  @GET
  def rddList(): Seq[RDDStorageInfo] = {
    val storageStatusList = ui.storageListener.storageStatusList
    val rddInfos = ui.storageListener.rddInfoList
    rddInfos.map{rddInfo =>
      AllRDDResource.getRDDStorageInfo(rddInfo.id, rddInfo, storageStatusList,
        includeDetails = false)
    }
  }

}

private[spark] object AllRDDResource {

  def getRDDStorageInfo(
      rddId: Int,
      listener: StorageListener,
      includeDetails: Boolean): Option[RDDStorageInfo] = {
    val storageStatusList = listener.storageStatusList
    listener.rddInfoList.find { _.id == rddId }.map { rddInfo =>
      getRDDStorageInfo(rddId, rddInfo, storageStatusList, includeDetails)
    }
  }

  def getRDDStorageInfo(
      rddId: Int,
      rddInfo: RDDInfo,
      storageStatusList: Seq[StorageStatus],
      includeDetails: Boolean): RDDStorageInfo = {
    val workers = storageStatusList.map { (rddId, _) }
    val blockLocations = StorageUtils.getRddBlockLocations(rddId, storageStatusList)
    val blocks = storageStatusList
      .flatMap { _.rddBlocksById(rddId) }
      .sortWith { _._1.name < _._1.name }
      .map { case (blockId, status) =>
        (blockId, status, blockLocations.get(blockId).getOrElse(Seq[String]("Unknown")))
      }

    val dataDistribution = if (includeDetails) {
      Some(storageStatusList.map { status =>
        new RDDDataDistribution(
          address = status.blockManagerId.hostPort,
          memoryUsed = status.memUsedByRdd(rddId),
          memoryRemaining = status.memRemaining,
          diskUsed = status.diskUsedByRdd(rddId)
        ) } )
    } else {
      None
    }
    val partitions = if (includeDetails) {
      Some(blocks.map { case (id, block, locations) =>
        new RDDPartitionInfo(
          blockName = id.name,
          storageLevel = block.storageLevel.description,
          memoryUsed = block.memSize,
          diskUsed = block.diskSize,
          executors = locations
        )
      } )
    } else {
      None
    }

    new RDDStorageInfo(
      id = rddId,
      name = rddInfo.name,
      numPartitions = rddInfo.numPartitions,
      numCachedPartitions = rddInfo.numCachedPartitions,
      storageLevel = rddInfo.storageLevel.description,
      memoryUsed = rddInfo.memSize,
      diskUsed = rddInfo.diskSize,
      dataDistribution = dataDistribution,
      partitions = partitions
    )
  }
} 
Example 15
Source File: EventLogDownloadResource.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.util.zip.ZipOutputStream
import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.{MediaType, Response, StreamingOutput}

import scala.util.control.NonFatal

import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.deploy.SparkHadoopUtil

@Produces(Array(MediaType.APPLICATION_OCTET_STREAM))
private[v1] class EventLogDownloadResource(
    val uIRoot: UIRoot,
    val appId: String,
    val attemptId: Option[String]) extends Logging {
  val conf = SparkHadoopUtil.get.newConfiguration(new SparkConf)

  @GET
  def getEventLogs(): Response = {
    try {
      val fileName = {
        attemptId match {
          case Some(id) => s"eventLogs-$appId-$id.zip"
          case None => s"eventLogs-$appId.zip"
        }
      }

      val stream = new StreamingOutput {
        override def write(output: OutputStream): Unit = {
          val zipStream = new ZipOutputStream(output)
          try {
            uIRoot.writeEventLogs(appId, attemptId, zipStream)
          } finally {
            zipStream.close()
          }

        }
      }

      Response.ok(stream)
        .header("Content-Disposition", s"attachment; filename=$fileName")
        .header("Content-Type", MediaType.APPLICATION_OCTET_STREAM)
        .build()
    } catch {
      case NonFatal(e) =>
        Response.serverError()
          .entity(s"Event logs are not available for app: $appId.")
          .status(Response.Status.SERVICE_UNAVAILABLE)
          .build()
    }
  }
} 
Example 16
Source File: ApplicationListResource.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.util.{Arrays, Date, List => JList}
import javax.ws.rs.{DefaultValue, GET, Produces, QueryParam}
import javax.ws.rs.core.MediaType

import org.apache.spark.deploy.history.ApplicationHistoryInfo
import org.apache.spark.deploy.master.{ApplicationInfo => InternalApplicationInfo}

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ApplicationListResource(uiRoot: UIRoot) {

  @GET
  def appList(
      @QueryParam("status") status: JList[ApplicationStatus],
      @DefaultValue("2010-01-01") @QueryParam("minDate") minDate: SimpleDateParam,
      @DefaultValue("3000-01-01") @QueryParam("maxDate") maxDate: SimpleDateParam)
  : Iterator[ApplicationInfo] = {
    val allApps = uiRoot.getApplicationInfoList
    val adjStatus = {
      if (status.isEmpty) {
        Arrays.asList(ApplicationStatus.values(): _*)
      } else {
        status
      }
    }
    val includeCompleted = adjStatus.contains(ApplicationStatus.COMPLETED)
    val includeRunning = adjStatus.contains(ApplicationStatus.RUNNING)
    allApps.filter { app =>
      val anyRunning = app.attempts.exists(!_.completed)
      // if any attempt is still running, we consider the app to also still be running
      val statusOk = (!anyRunning && includeCompleted) ||
        (anyRunning && includeRunning)
      // keep the app if *any* attempts fall in the right time window
      val dateOk = app.attempts.exists { attempt =>
        attempt.startTime.getTime >= minDate.timestamp &&
          attempt.startTime.getTime <= maxDate.timestamp
      }
      statusOk && dateOk
    }
  }
}

private[spark] object ApplicationsListResource {
  def appHistoryInfoToPublicAppInfo(app: ApplicationHistoryInfo): ApplicationInfo = {
    new ApplicationInfo(
      id = app.id,
      name = app.name,
      coresGranted = None,
      maxCores = None,
      coresPerExecutor = None,
      memoryPerExecutorMB = None,
      attempts = app.attempts.map { internalAttemptInfo =>
        new ApplicationAttemptInfo(
          attemptId = internalAttemptInfo.attemptId,
          startTime = new Date(internalAttemptInfo.startTime),
          endTime = new Date(internalAttemptInfo.endTime),
          sparkUser = internalAttemptInfo.sparkUser,
          completed = internalAttemptInfo.completed
        )
      }
    )
  }

  def convertApplicationInfo(
      internal: InternalApplicationInfo,
      completed: Boolean): ApplicationInfo = {
    // standalone application info always has just one attempt
    new ApplicationInfo(
      id = internal.id,
      name = internal.desc.name,
      coresGranted = Some(internal.coresGranted),
      maxCores = internal.desc.maxCores,
      coresPerExecutor = internal.desc.coresPerExecutor,
      memoryPerExecutorMB = Some(internal.desc.memoryPerExecutorMB),
      attempts = Seq(new ApplicationAttemptInfo(
        attemptId = None,
        startTime = new Date(internal.startTime),
        endTime = new Date(internal.endTime),
        sparkUser = internal.desc.user,
        completed = completed
      ))
    )
  }

} 
Example 17
Source File: OneJobResource.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{PathParam, GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.JobExecutionStatus
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.jobs.UIData.JobUIData

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class OneJobResource(ui: SparkUI) {

  @GET
  def oneJob(@PathParam("jobId") jobId: Int): JobData = {
    val statusToJobs: Seq[(JobExecutionStatus, Seq[JobUIData])] =
      AllJobsResource.getStatusToJobs(ui)
    val jobOpt = statusToJobs.map {_._2} .flatten.find { jobInfo => jobInfo.jobId == jobId}
    jobOpt.map { job =>
      AllJobsResource.convertJobData(job, ui.jobProgressListener, false)
    }.getOrElse {
      throw new NotFoundException("unknown job: " + jobId)
    }
  }

} 
Example 18
Source File: ExecutorListResource.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    listener.synchronized {
      // The follow codes should be protected by `listener` to make sure no executors will be
      // removed before we query their status. See SPARK-12784.
      val storageStatusList = listener.storageStatusList
      (0 until storageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId)
      }
    }
  }
} 
Example 19
Source File: HelloService.scala    From swagger-akka-http-sample   with Apache License 2.0 5 votes vote down vote up
package com.example.akka.hello

import javax.ws.rs.{GET, Path, Produces}
import akka.actor.ActorRef
import akka.http.scaladsl.server.{Directives, Route}
import akka.pattern.ask
import akka.util.Timeout
import com.example.akka.DefaultJsonFormats
import com.example.akka.hello.HelloActor._
import io.swagger.v3.oas.annotations.enums.ParameterIn
import io.swagger.v3.oas.annotations.media.{Content, Schema}
import io.swagger.v3.oas.annotations.responses.ApiResponse
import io.swagger.v3.oas.annotations.{Operation, Parameter}
import javax.ws.rs.core.MediaType
import spray.json.RootJsonFormat

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.DurationInt

@Path("/hello")
class HelloService(hello: ActorRef)(implicit executionContext: ExecutionContext)
  extends Directives with DefaultJsonFormats {

  implicit val timeout: Timeout = Timeout(2.seconds)
  implicit val greetingFormat: RootJsonFormat[Greeting] = jsonFormat1(Greeting)

  val route: Route =
    getHello ~
    getHelloSegment

  @GET
  @Produces(Array(MediaType.APPLICATION_JSON))
  @Operation(summary = "Return Hello greeting (anonymous)", description = "Return Hello greeting for anonymous request",
    responses = Array(
      new ApiResponse(responseCode = "200", description = "Hello response",
        content = Array(new Content(schema = new Schema(implementation = classOf[Greeting])))),
      new ApiResponse(responseCode = "500", description = "Internal server error"))
  )
  def getHello: Route =
    path("hello") {
      get {
        complete { (hello ? AnonymousHello).mapTo[Greeting] }
      }
    }

  @GET
  @Produces(Array(MediaType.APPLICATION_JSON))
  @Operation(summary = "Return Hello greeting", description = "Return Hello greeting for named user",
    parameters = Array(new Parameter(name = "name", in = ParameterIn.PATH, description = "user name")),
    responses = Array(
      new ApiResponse(responseCode = "200", description = "Hello response",
        content = Array(new Content(schema = new Schema(implementation = classOf[Greeting])))),
      new ApiResponse(responseCode = "500", description = "Internal server error"))
  )
  def getHelloSegment: Route =
    path("hello" / Segment) { name =>
      get {
        complete { (hello ? Hello(name)).mapTo[Greeting] }
      }
    }
} 
Example 20
Source File: HealthCheckResource.scala    From orders-aws   with Apache License 2.0 5 votes vote down vote up
package works.weave.socks.aws.orders.presentation.resource

import java.net.URI
import java.util.Calendar;
import java.util.Date;
import javax.ws.rs.GET
import javax.ws.rs.Path
import javax.ws.rs.core.Response

import org.slf4j.LoggerFactory
import org.springframework.stereotype.Component

import works.weave.socks.aws.orders.presentation.resource.HealthCheckResource._
import works.weave.socks.spring.aws.DynamoConfiguration

@Component
@Path("health")
class HealthCheckResource(dynamoConnection : DynamoConfiguration) {

  @GET
  def getHealthCheck() : Response = {
    Log.info("health check requested")
    val dateNow = Calendar.getInstance().getTime();

    val ordersHealth = Map(
      "service" -> "orders-aws",
      "status" -> "OK",
      "time" -> dateNow)
    val dynamoDBHealth = scala.collection.mutable.Map(
      "service" -> "orders-aws-dynamodb",
      "status" -> "OK",
      "time" -> dateNow)

    try {
      val table = dynamoConnection.client.describeTable("orders")
    } catch {
      case unknown : Throwable => dynamoDBHealth("status") = "err"
    }

    val map = Map("health" -> Array(ordersHealth, dynamoDBHealth))
    Log.info("health check completed")
    Response.created(new URI("http://tbd")).entity(map).build()
  }

}
object HealthCheckResource {
  val Log = LoggerFactory.getLogger(classOf[HealthCheckResource])
} 
Example 21
Source File: EventLogDownloadResource.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.util.zip.ZipOutputStream
import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.{MediaType, Response, StreamingOutput}

import scala.util.control.NonFatal

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging

@Produces(Array(MediaType.APPLICATION_OCTET_STREAM))
private[v1] class EventLogDownloadResource(
    val uIRoot: UIRoot,
    val appId: String,
    val attemptId: Option[String]) extends Logging {
  val conf = SparkHadoopUtil.get.newConfiguration(new SparkConf)

  @GET
  def getEventLogs(): Response = {
    try {
      val fileName = {
        attemptId match {
          case Some(id) => s"eventLogs-$appId-$id.zip"
          case None => s"eventLogs-$appId.zip"
        }
      }

      val stream = new StreamingOutput {
        override def write(output: OutputStream): Unit = {
          val zipStream = new ZipOutputStream(output)
          try {
            uIRoot.writeEventLogs(appId, attemptId, zipStream)
          } finally {
            zipStream.close()
          }

        }
      }

      Response.ok(stream)
        .header("Content-Disposition", s"attachment; filename=$fileName")
        .header("Content-Type", MediaType.APPLICATION_OCTET_STREAM)
        .build()
    } catch {
      case NonFatal(e) =>
        Response.serverError()
          .entity(s"Event logs are not available for app: $appId.")
          .status(Response.Status.SERVICE_UNAVAILABLE)
          .build()
    }
  }
} 
Example 22
Source File: EventLogDownloadResource.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.util.zip.ZipOutputStream
import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.{MediaType, Response, StreamingOutput}

import scala.util.control.NonFatal

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging

@Produces(Array(MediaType.APPLICATION_OCTET_STREAM))
private[v1] class EventLogDownloadResource(
    val uIRoot: UIRoot,
    val appId: String,
    val attemptId: Option[String]) extends Logging {
  val conf = SparkHadoopUtil.get.newConfiguration(new SparkConf)

  @GET
  def getEventLogs(): Response = {
    try {
      val fileName = {
        attemptId match {
          case Some(id) => s"eventLogs-$appId-$id.zip"
          case None => s"eventLogs-$appId.zip"
        }
      }

      val stream = new StreamingOutput {
        override def write(output: OutputStream): Unit = {
          val zipStream = new ZipOutputStream(output)
          try {
            uIRoot.writeEventLogs(appId, attemptId, zipStream)
          } finally {
            zipStream.close()
          }

        }
      }

      Response.ok(stream)
        .header("Content-Disposition", s"attachment; filename=$fileName")
        .header("Content-Type", MediaType.APPLICATION_OCTET_STREAM)
        .build()
    } catch {
      case NonFatal(e) =>
        Response.serverError()
          .entity(s"Event logs are not available for app: $appId.")
          .status(Response.Status.SERVICE_UNAVAILABLE)
          .build()
    }
  }
} 
Example 23
Source File: ApplicationListResource.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.util.{Date, List => JList}
import javax.ws.rs.{DefaultValue, GET, Produces, QueryParam}
import javax.ws.rs.core.MediaType

import org.apache.spark.deploy.history.ApplicationHistoryInfo

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ApplicationListResource(uiRoot: UIRoot) {

  @GET
  def appList(
      @QueryParam("status") status: JList[ApplicationStatus],
      @DefaultValue("2010-01-01") @QueryParam("minDate") minDate: SimpleDateParam,
      @DefaultValue("3000-01-01") @QueryParam("maxDate") maxDate: SimpleDateParam,
      @QueryParam("limit") limit: Integer)
  : Iterator[ApplicationInfo] = {

    val numApps = Option(limit).map(_.toInt).getOrElse(Integer.MAX_VALUE)
    val includeCompleted = status.isEmpty || status.contains(ApplicationStatus.COMPLETED)
    val includeRunning = status.isEmpty || status.contains(ApplicationStatus.RUNNING)

    uiRoot.getApplicationInfoList.filter { app =>
      val anyRunning = app.attempts.exists(!_.completed)
      // if any attempt is still running, we consider the app to also still be running;
      // keep the app if *any* attempts fall in the right time window
      ((!anyRunning && includeCompleted) || (anyRunning && includeRunning)) &&
      app.attempts.exists { attempt =>
        val start = attempt.startTime.getTime
        start >= minDate.timestamp && start <= maxDate.timestamp
      }
    }.take(numApps)
  }
}

private[spark] object ApplicationsListResource {
  def appHistoryInfoToPublicAppInfo(app: ApplicationHistoryInfo): ApplicationInfo = {
    new ApplicationInfo(
      id = app.id,
      name = app.name,
      coresGranted = None,
      maxCores = None,
      coresPerExecutor = None,
      memoryPerExecutorMB = None,
      attempts = app.attempts.map { internalAttemptInfo =>
        new ApplicationAttemptInfo(
          attemptId = internalAttemptInfo.attemptId,
          startTime = new Date(internalAttemptInfo.startTime),
          endTime = new Date(internalAttemptInfo.endTime),
          duration =
            if (internalAttemptInfo.endTime > 0) {
              internalAttemptInfo.endTime - internalAttemptInfo.startTime
            } else {
              0
            },
          lastUpdated = new Date(internalAttemptInfo.lastUpdated),
          sparkUser = internalAttemptInfo.sparkUser,
          completed = internalAttemptInfo.completed
        )
      }
    )
  }
} 
Example 24
Source File: OneJobResource.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.JobExecutionStatus
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.jobs.UIData.JobUIData

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class OneJobResource(ui: SparkUI) {

  @GET
  def oneJob(@PathParam("jobId") jobId: Int): JobData = {
    val statusToJobs: Seq[(JobExecutionStatus, Seq[JobUIData])] =
      AllJobsResource.getStatusToJobs(ui)
    val jobOpt = statusToJobs.flatMap(_._2).find { jobInfo => jobInfo.jobId == jobId}
    jobOpt.map { job =>
      AllJobsResource.convertJobData(job, ui.jobProgressListener, false)
    }.getOrElse {
      throw new NotFoundException("unknown job: " + jobId)
    }
  }

} 
Example 25
Source File: AllExecutorListResource.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    listener.synchronized {
      // The follow codes should be protected by `listener` to make sure no executors will be
      // removed before we query their status. See SPARK-12784.
      (0 until listener.activeStorageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = true)
      } ++ (0 until listener.deadStorageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = false)
      }
    }
  }
} 
Example 26
Source File: ExecutorListResource.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    listener.synchronized {
      // The follow codes should be protected by `listener` to make sure no executors will be
      // removed before we query their status. See SPARK-12784.
      val storageStatusList = listener.activeStorageStatusList
      (0 until storageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = true)
      }
    }
  }
} 
Example 27
Source File: Status.scala    From daf-semantics   with Apache License 2.0 5 votes vote down vote up
package it.almawave.kb.http.endpoints

import java.time.LocalTime
import io.swagger.annotations.Api
import javax.ws.rs.Path
import javax.ws.rs.GET
import javax.ws.rs.Produces
import io.swagger.annotations.ApiOperation
import javax.ws.rs.core.MediaType
import org.slf4j.LoggerFactory
import javax.ws.rs.core.Context
import javax.ws.rs.core.UriInfo
import javax.ws.rs.core.Request
import it.almawave.linkeddata.kb.utils.JSONHelper
import java.time.LocalDateTime
import java.time.ZonedDateTime
import java.time.format.DateTimeFormatter
import java.util.Locale
import java.time.ZoneId

@Api(tags = Array("catalog"))
@Path("/status")
class Status {

  private val logger = LoggerFactory.getLogger(this.getClass)

  @Context
  var uriInfo: UriInfo = null

  @GET
  @Produces(Array(MediaType.APPLICATION_JSON))
  @ApiOperation(nickname = "status", value = "endpoint status")
  def status() = {

    val base_uri = uriInfo.getBaseUri
    val msg = s"the service is running at ${base_uri}"
    logger.info(msg)

    val _now = now()
    StatusMsg(_now._1, _now._2, msg)

  }

  def now() = {

    val zdt = ZonedDateTime.now(ZoneId.of("+1"))
    val dtf = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSSSSZ")

    (zdt.format(dtf), zdt)

  }

}

case class StatusMsg(
  now:      String,
  dateTime: ZonedDateTime,
  msg:      String
) 
Example 28
Source File: AppServer.scala    From keycloak-benchmark   with Apache License 2.0 5 votes vote down vote up
package org.jboss.perf

import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.LongAdder
import javax.ws.rs.core.Response
import javax.ws.rs.{GET, POST, Path}

import com.sun.net.httpserver.HttpServer
import org.jboss.resteasy.plugins.server.sun.http.HttpContextBuilder
import org.keycloak.constants.AdapterConstants


object AppServer {
  private val address: Array[String] = Options.app.split(":")
  private val httpServer = HttpServer.create(new InetSocketAddress(address(0), address(1).toInt), 100)
  private val contextBuilder = new HttpContextBuilder()
  contextBuilder.getDeployment().getActualResourceClasses().add(classOf[AppServer])
  private val context = contextBuilder.bind(httpServer)

  private val logouts = new LongAdder();
  private val versions = new LongAdder();
  private val pushNotBefores = new LongAdder();
  private val queryBearerTokens = new LongAdder();
  private val testAvailables = new LongAdder();

  def main(args: Array[String]): Unit = {
    httpServer.start()
    val timeout = Options.rampUp + Options.duration + Options.rampDown + 10;
    Thread.sleep(TimeUnit.SECONDS.toMillis(timeout))
    httpServer.stop(0);
    printf("AppServer stats:%n%8d logout%n%8d version%n%8d pushNotBefore%n%8d queryBearerToken%n%8d testAvailables%n",
      logouts.longValue(), versions.longValue(), pushNotBefores.longValue(), queryBearerTokens.longValue(), testAvailables.longValue())
  }
}

@Path("/admin")
class AppServer {

  @GET
  @POST
  @Path(AdapterConstants.K_LOGOUT)
  def logout(): Response = {
    AppServer.logouts.increment()
    Response.ok().build()
  }

  @GET
  @POST
  @Path(AdapterConstants.K_VERSION)
  def version(): Response = {
    AppServer.versions.increment()
    Response.ok().build()
  }

  @GET
  @POST
  @Path(AdapterConstants.K_PUSH_NOT_BEFORE)
  def pushNotBefore(): Response = {
    AppServer.pushNotBefores.increment()
    Response.ok().build()
  }

  @GET
  @POST
  @Path(AdapterConstants.K_QUERY_BEARER_TOKEN)
  def queryBearerToken(): Response = {
    AppServer.queryBearerTokens.increment()
    Response.ok().build()
  }

  @GET
  @POST
  @Path(AdapterConstants.K_TEST_AVAILABLE)
  def testAvailable(): Response = {
    AppServer.testAvailables.increment()
    Response.ok().build()
  }

} 
Example 29
Source File: ExecutorNumResource.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Path, Produces}
import javax.ws.rs.core.MediaType

import scala.collection.JavaConverters._

import org.apache.spark.monitor.{ExecutorNum, ExecutorNumWrapper}

@Path("/v1")
@Produces(Array(MediaType.APPLICATION_JSON))
class ExecutorNumResource extends BaseAppResource {
  @GET
  @Path("applications/{appId}/executorNumCurve")
  def executorNumCurve1(): Seq[ExecutorNum] =
    withUI(_.store.store.view(classOf[ExecutorNumWrapper]).asScala.map(_.point).toSeq)
  @GET
  @Path("applications/{appId}/{attemptId}/executorNumCurve")
  def executorNumCurve2(): Seq[ExecutorNum] =
    withUI(_.store.store.view(classOf[ExecutorNumWrapper]).asScala.map(_.point).toSeq)

} 
Example 30
Source File: AllRDDResource.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.storage.{RDDInfo, StorageStatus, StorageUtils}
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.storage.StorageListener

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllRDDResource(ui: SparkUI) {

  @GET
  def rddList(): Seq[RDDStorageInfo] = {
    val storageStatusList = ui.storageListener.activeStorageStatusList
    val rddInfos = ui.storageListener.rddInfoList
    rddInfos.map{rddInfo =>
      AllRDDResource.getRDDStorageInfo(rddInfo.id, rddInfo, storageStatusList,
        includeDetails = false)
    }
  }

}

private[spark] object AllRDDResource {

  def getRDDStorageInfo(
      rddId: Int,
      listener: StorageListener,
      includeDetails: Boolean): Option[RDDStorageInfo] = {
    val storageStatusList = listener.activeStorageStatusList
    listener.rddInfoList.find { _.id == rddId }.map { rddInfo =>
      getRDDStorageInfo(rddId, rddInfo, storageStatusList, includeDetails)
    }
  }

  def getRDDStorageInfo(
      rddId: Int,
      rddInfo: RDDInfo,
      storageStatusList: Seq[StorageStatus],
      includeDetails: Boolean): RDDStorageInfo = {
    val workers = storageStatusList.map { (rddId, _) }
    val blockLocations = StorageUtils.getRddBlockLocations(rddId, storageStatusList)
    val blocks = storageStatusList
      .flatMap { _.rddBlocksById(rddId) }
      .sortWith { _._1.name < _._1.name }
      .map { case (blockId, status) =>
        (blockId, status, blockLocations.getOrElse(blockId, Seq[String]("Unknown")))
      }

    val dataDistribution = if (includeDetails) {
      Some(storageStatusList.map { status =>
        new RDDDataDistribution(
          address = status.blockManagerId.hostPort,
          memoryUsed = status.memUsedByRdd(rddId),
          memoryRemaining = status.memRemaining,
          diskUsed = status.diskUsedByRdd(rddId)
        ) } )
    } else {
      None
    }
    val partitions = if (includeDetails) {
      Some(blocks.map { case (id, block, locations) =>
        new RDDPartitionInfo(
          blockName = id.name,
          storageLevel = block.storageLevel.description,
          memoryUsed = block.memSize,
          diskUsed = block.diskSize,
          executors = locations
        )
      } )
    } else {
      None
    }

    new RDDStorageInfo(
      id = rddId,
      name = rddInfo.name,
      numPartitions = rddInfo.numPartitions,
      numCachedPartitions = rddInfo.numCachedPartitions,
      storageLevel = rddInfo.storageLevel.description,
      memoryUsed = rddInfo.memSize,
      diskUsed = rddInfo.diskSize,
      dataDistribution = dataDistribution,
      partitions = partitions
    )
  }
} 
Example 31
Source File: AllRDDResource.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.storage.{RDDInfo, StorageStatus, StorageUtils}
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.storage.StorageListener

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllRDDResource(ui: SparkUI) {

  @GET
  def rddList(): Seq[RDDStorageInfo] = {
    val storageStatusList = ui.storageListener.activeStorageStatusList
    val rddInfos = ui.storageListener.rddInfoList
    rddInfos.map{rddInfo =>
      AllRDDResource.getRDDStorageInfo(rddInfo.id, rddInfo, storageStatusList,
        includeDetails = false)
    }
  }

}

private[spark] object AllRDDResource {

  def getRDDStorageInfo(
      rddId: Int,
      listener: StorageListener,
      includeDetails: Boolean): Option[RDDStorageInfo] = {
    val storageStatusList = listener.activeStorageStatusList
    listener.rddInfoList.find { _.id == rddId }.map { rddInfo =>
      getRDDStorageInfo(rddId, rddInfo, storageStatusList, includeDetails)
    }
  }

  def getRDDStorageInfo(
      rddId: Int,
      rddInfo: RDDInfo,
      storageStatusList: Seq[StorageStatus],
      includeDetails: Boolean): RDDStorageInfo = {
    val workers = storageStatusList.map { (rddId, _) }
    val blockLocations = StorageUtils.getRddBlockLocations(rddId, storageStatusList)
    val blocks = storageStatusList
      .flatMap { _.rddBlocksById(rddId) }
      .sortWith { _._1.name < _._1.name }
      .map { case (blockId, status) =>
        (blockId, status, blockLocations.getOrElse(blockId, Seq[String]("Unknown")))
      }

    val dataDistribution = if (includeDetails) {
      Some(storageStatusList.map { status =>
        new RDDDataDistribution(
          address = status.blockManagerId.hostPort,
          memoryUsed = status.memUsedByRdd(rddId),
          memoryRemaining = status.memRemaining,
          diskUsed = status.diskUsedByRdd(rddId)
        ) } )
    } else {
      None
    }
    val partitions = if (includeDetails) {
      Some(blocks.map { case (id, block, locations) =>
        new RDDPartitionInfo(
          blockName = id.name,
          storageLevel = block.storageLevel.description,
          memoryUsed = block.memSize,
          diskUsed = block.diskSize,
          executors = locations
        )
      } )
    } else {
      None
    }

    new RDDStorageInfo(
      id = rddId,
      name = rddInfo.name,
      numPartitions = rddInfo.numPartitions,
      numCachedPartitions = rddInfo.numCachedPartitions,
      storageLevel = rddInfo.storageLevel.description,
      memoryUsed = rddInfo.memSize,
      diskUsed = rddInfo.diskSize,
      dataDistribution = dataDistribution,
      partitions = partitions
    )
  }
} 
Example 32
Source File: ApplicationListResource.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.util.{Date, List => JList}
import javax.ws.rs.{DefaultValue, GET, Produces, QueryParam}
import javax.ws.rs.core.MediaType

import org.apache.spark.deploy.history.ApplicationHistoryInfo

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ApplicationListResource(uiRoot: UIRoot) {

  @GET
  def appList(
      @QueryParam("status") status: JList[ApplicationStatus],
      @DefaultValue("2010-01-01") @QueryParam("minDate") minDate: SimpleDateParam,
      @DefaultValue("3000-01-01") @QueryParam("maxDate") maxDate: SimpleDateParam,
      @QueryParam("limit") limit: Integer)
  : Iterator[ApplicationInfo] = {

    val numApps = Option(limit).map(_.toInt).getOrElse(Integer.MAX_VALUE)
    val includeCompleted = status.isEmpty || status.contains(ApplicationStatus.COMPLETED)
    val includeRunning = status.isEmpty || status.contains(ApplicationStatus.RUNNING)

    uiRoot.getApplicationInfoList.filter { app =>
      val anyRunning = app.attempts.exists(!_.completed)
      // if any attempt is still running, we consider the app to also still be running;
      // keep the app if *any* attempts fall in the right time window
      ((!anyRunning && includeCompleted) || (anyRunning && includeRunning)) &&
      app.attempts.exists { attempt =>
        val start = attempt.startTime.getTime
        start >= minDate.timestamp && start <= maxDate.timestamp
      }
    }.take(numApps)
  }
}

private[spark] object ApplicationsListResource {
  def appHistoryInfoToPublicAppInfo(app: ApplicationHistoryInfo): ApplicationInfo = {
    new ApplicationInfo(
      id = app.id,
      name = app.name,
      coresGranted = None,
      maxCores = None,
      coresPerExecutor = None,
      memoryPerExecutorMB = None,
      attempts = app.attempts.map { internalAttemptInfo =>
        new ApplicationAttemptInfo(
          attemptId = internalAttemptInfo.attemptId,
          startTime = new Date(internalAttemptInfo.startTime),
          endTime = new Date(internalAttemptInfo.endTime),
          duration =
            if (internalAttemptInfo.endTime > 0) {
              internalAttemptInfo.endTime - internalAttemptInfo.startTime
            } else {
              0
            },
          lastUpdated = new Date(internalAttemptInfo.lastUpdated),
          sparkUser = internalAttemptInfo.sparkUser,
          completed = internalAttemptInfo.completed
        )
      }
    )
  }
} 
Example 33
Source File: OneJobResource.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.JobExecutionStatus
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.jobs.UIData.JobUIData

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class OneJobResource(ui: SparkUI) {

  @GET
  def oneJob(@PathParam("jobId") jobId: Int): JobData = {
    val statusToJobs: Seq[(JobExecutionStatus, Seq[JobUIData])] =
      AllJobsResource.getStatusToJobs(ui)
    val jobOpt = statusToJobs.flatMap(_._2).find { jobInfo => jobInfo.jobId == jobId}
    jobOpt.map { job =>
      AllJobsResource.convertJobData(job, ui.jobProgressListener, false)
    }.getOrElse {
      throw new NotFoundException("unknown job: " + jobId)
    }
  }

} 
Example 34
Source File: AllExecutorListResource.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    listener.synchronized {
      // The follow codes should be protected by `listener` to make sure no executors will be
      // removed before we query their status. See SPARK-12784.
      (0 until listener.activeStorageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = true)
      } ++ (0 until listener.deadStorageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = false)
      }
    }
  }
} 
Example 35
Source File: ExecutorListResource.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    listener.synchronized {
      // The follow codes should be protected by `listener` to make sure no executors will be
      // removed before we query their status. See SPARK-12784.
      val storageStatusList = listener.activeStorageStatusList
      (0 until storageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = true)
      }
    }
  }
} 
Example 36
Source File: AllRDDResource.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.storage.{RDDInfo, StorageStatus, StorageUtils}
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.storage.StorageListener

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllRDDResource(ui: SparkUI) {

  @GET
  def rddList(): Seq[RDDStorageInfo] = {
    val storageStatusList = ui.storageListener.activeStorageStatusList
    val rddInfos = ui.storageListener.rddInfoList
    rddInfos.map{rddInfo =>
      AllRDDResource.getRDDStorageInfo(rddInfo.id, rddInfo, storageStatusList,
        includeDetails = false)
    }
  }

}

private[spark] object AllRDDResource {

  def getRDDStorageInfo(
      rddId: Int,
      listener: StorageListener,
      includeDetails: Boolean): Option[RDDStorageInfo] = {
    val storageStatusList = listener.activeStorageStatusList
    listener.rddInfoList.find { _.id == rddId }.map { rddInfo =>
      getRDDStorageInfo(rddId, rddInfo, storageStatusList, includeDetails)
    }
  }

  def getRDDStorageInfo(
      rddId: Int,
      rddInfo: RDDInfo,
      storageStatusList: Seq[StorageStatus],
      includeDetails: Boolean): RDDStorageInfo = {
    val workers = storageStatusList.map { (rddId, _) }
    val blockLocations = StorageUtils.getRddBlockLocations(rddId, storageStatusList)
    val blocks = storageStatusList
      .flatMap { _.rddBlocksById(rddId) }
      .sortWith { _._1.name < _._1.name }
      .map { case (blockId, status) =>
        (blockId, status, blockLocations.getOrElse(blockId, Seq[String]("Unknown")))
      }

    val dataDistribution = if (includeDetails) {
      Some(storageStatusList.map { status =>
        new RDDDataDistribution(
          address = status.blockManagerId.hostPort,
          memoryUsed = status.memUsedByRdd(rddId),
          memoryRemaining = status.memRemaining,
          diskUsed = status.diskUsedByRdd(rddId)
        ) } )
    } else {
      None
    }
    val partitions = if (includeDetails) {
      Some(blocks.map { case (id, block, locations) =>
        new RDDPartitionInfo(
          blockName = id.name,
          storageLevel = block.storageLevel.description,
          memoryUsed = block.memSize,
          diskUsed = block.diskSize,
          executors = locations
        )
      } )
    } else {
      None
    }

    new RDDStorageInfo(
      id = rddId,
      name = rddInfo.name,
      numPartitions = rddInfo.numPartitions,
      numCachedPartitions = rddInfo.numCachedPartitions,
      storageLevel = rddInfo.storageLevel.description,
      memoryUsed = rddInfo.memSize,
      diskUsed = rddInfo.diskSize,
      dataDistribution = dataDistribution,
      partitions = partitions
    )
  }
} 
Example 37
Source File: EventLogDownloadResource.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.util.zip.ZipOutputStream
import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.{MediaType, Response, StreamingOutput}

import scala.util.control.NonFatal

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging

@Produces(Array(MediaType.APPLICATION_OCTET_STREAM))
private[v1] class EventLogDownloadResource(
    val uIRoot: UIRoot,
    val appId: String,
    val attemptId: Option[String]) extends Logging {
  val conf = SparkHadoopUtil.get.newConfiguration(new SparkConf)

  @GET
  def getEventLogs(): Response = {
    try {
      val fileName = {
        attemptId match {
          case Some(id) => s"eventLogs-$appId-$id.zip"
          case None => s"eventLogs-$appId.zip"
        }
      }

      val stream = new StreamingOutput {
        override def write(output: OutputStream): Unit = {
          val zipStream = new ZipOutputStream(output)
          try {
            uIRoot.writeEventLogs(appId, attemptId, zipStream)
          } finally {
            zipStream.close()
          }

        }
      }

      Response.ok(stream)
        .header("Content-Disposition", s"attachment; filename=$fileName")
        .header("Content-Type", MediaType.APPLICATION_OCTET_STREAM)
        .build()
    } catch {
      case NonFatal(e) =>
        Response.serverError()
          .entity(s"Event logs are not available for app: $appId.")
          .status(Response.Status.SERVICE_UNAVAILABLE)
          .build()
    }
  }
} 
Example 38
Source File: ApplicationListResource.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.util.{Date, List => JList}
import javax.ws.rs.{DefaultValue, GET, Produces, QueryParam}
import javax.ws.rs.core.MediaType

import org.apache.spark.deploy.history.ApplicationHistoryInfo

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ApplicationListResource(uiRoot: UIRoot) {

  @GET
  def appList(
      @QueryParam("status") status: JList[ApplicationStatus],
      @DefaultValue("2010-01-01") @QueryParam("minDate") minDate: SimpleDateParam,
      @DefaultValue("3000-01-01") @QueryParam("maxDate") maxDate: SimpleDateParam,
      @QueryParam("limit") limit: Integer)
  : Iterator[ApplicationInfo] = {

    val numApps = Option(limit).map(_.toInt).getOrElse(Integer.MAX_VALUE)
    val includeCompleted = status.isEmpty || status.contains(ApplicationStatus.COMPLETED)
    val includeRunning = status.isEmpty || status.contains(ApplicationStatus.RUNNING)

    uiRoot.getApplicationInfoList.filter { app =>
      val anyRunning = app.attempts.exists(!_.completed)
      // if any attempt is still running, we consider the app to also still be running;
      // keep the app if *any* attempts fall in the right time window
      ((!anyRunning && includeCompleted) || (anyRunning && includeRunning)) &&
      app.attempts.exists { attempt =>
        val start = attempt.startTime.getTime
        start >= minDate.timestamp && start <= maxDate.timestamp
      }
    }.take(numApps)
  }
}

private[spark] object ApplicationsListResource {
  def appHistoryInfoToPublicAppInfo(app: ApplicationHistoryInfo): ApplicationInfo = {
    new ApplicationInfo(
      id = app.id,
      name = app.name,
      coresGranted = None,
      maxCores = None,
      coresPerExecutor = None,
      memoryPerExecutorMB = None,
      attempts = app.attempts.map { internalAttemptInfo =>
        new ApplicationAttemptInfo(
          attemptId = internalAttemptInfo.attemptId,
          startTime = new Date(internalAttemptInfo.startTime),
          endTime = new Date(internalAttemptInfo.endTime),
          duration =
            if (internalAttemptInfo.endTime > 0) {
              internalAttemptInfo.endTime - internalAttemptInfo.startTime
            } else {
              0
            },
          lastUpdated = new Date(internalAttemptInfo.lastUpdated),
          sparkUser = internalAttemptInfo.sparkUser,
          completed = internalAttemptInfo.completed
        )
      }
    )
  }
} 
Example 39
Source File: OneJobResource.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.JobExecutionStatus
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.jobs.UIData.JobUIData

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class OneJobResource(ui: SparkUI) {

  @GET
  def oneJob(@PathParam("jobId") jobId: Int): JobData = {
    val statusToJobs: Seq[(JobExecutionStatus, Seq[JobUIData])] =
      AllJobsResource.getStatusToJobs(ui)
    val jobOpt = statusToJobs.flatMap(_._2).find { jobInfo => jobInfo.jobId == jobId}
    jobOpt.map { job =>
      AllJobsResource.convertJobData(job, ui.jobProgressListener, false)
    }.getOrElse {
      throw new NotFoundException("unknown job: " + jobId)
    }
  }

}