org.joda.time.Interval Scala Examples
The following examples show how to use org.joda.time.Interval.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: TimeSeriesQuery.scala From XSQL with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.datasources.druid import org.joda.time.{DateTime, Interval} import org.json4s.JsonAST._ import org.json4s.JsonDSL._ case class TimeSeriesQuery( source: String, interval: Interval, descending: String = "false", granularity: Granularity, aggregate: Seq[Aggregation], postAggregate: Seq[PostAggregation] = Nil, filter: QueryFilter = QueryFilter.All) { val g: JValue = granularity match { case SimpleGranularity(name) => name case p: PeriodGranularity => p.toJson case d: DurationGranularity => d.toJson } def toJson: JValue = { JObject( "queryType" -> "timeseries", "dataSource" -> source, "descending" -> descending, "granularity" -> g, "aggregations" -> aggregate.map(_.toJson), "postAggregations" -> postAggregate.map(_.toJson), "intervals" -> Time.intervalToString(interval), "filter" -> filter.toJson) } } case class TimeSeriesResponse(data: Seq[(DateTime, Map[String, Any])]) object TimeSeriesResponse { implicit val formats = org.json4s.DefaultFormats def parse(js: JValue): TimeSeriesResponse = { js match { case JArray(results) => val data = results.map { r => val time = Time.parse((r \ "timestamp").extract[String]) val values = (r \ "result").asInstanceOf[JObject].values time -> values } TimeSeriesResponse(data) case JNothing => TimeSeriesResponse(null) case err @ _ => throw new IllegalArgumentException("Invalid time series response: " + err) } } }
Example 2
Source File: GroupByQuery.scala From XSQL with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.datasources.druid import org.joda.time.{DateTime, Interval} import org.json4s.JsonAST._ import org.json4s.JsonDSL._ case class GroupByQuery( source: String, interval: Interval, granularity: Granularity, dimensions: Seq[String], aggregate: Seq[Aggregation], postAggregate: Seq[PostAggregation] = Nil, filter: QueryFilter = QueryFilter.All, orderBy: Seq[ColumnOrder] = Nil, limit: Option[Int] = None) { val g: JValue = granularity match { case SimpleGranularity(name) => name case p: PeriodGranularity => p.toJson case d: DurationGranularity => d.toJson } def toJson: JValue = { JObject( "queryType" -> "groupBy", "dataSource" -> source, "granularity" -> g, "dimensions" -> dimensions, "aggregations" -> aggregate.map(_.toJson), "postAggregations" -> postAggregate.map(_.toJson), "intervals" -> Time.intervalToString(interval), "filter" -> filter.toJson, "limitSpec" -> OrderBy(orderBy, limit).toJson) } } case class GroupByResponse(data: Seq[(DateTime, Map[String, Any])]) object GroupByResponse { implicit val formats = org.json4s.DefaultFormats def parse(js: JValue): GroupByResponse = { js match { case JArray(results) => val data = results.map { r => val timestamp = Time.parse((r \ "timestamp").extract[String]) val values = (r \ "event").asInstanceOf[JObject].values timestamp -> values } GroupByResponse(data) case JNothing => GroupByResponse(null) case err @ _ => throw new IllegalArgumentException("Invalid group by response: " + err) } } }
Example 3
Source File: TopNSelectQuery.scala From XSQL with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.datasources.druid import org.joda.time.{DateTime, Interval} import org.json4s.JsonAST._ import org.json4s.JsonDSL._ case class TopNSelectQuery( source: String, dimension: String, metric: Metric, interval: Interval, granularity: Granularity, aggregate: Seq[Aggregation], postAggregate: Seq[PostAggregation] = Nil, filter: QueryFilter = QueryFilter.All, limit: Int = 20) { val g: JValue = granularity match { case SimpleGranularity(name) => name case p: PeriodGranularity => p.toJson case d: DurationGranularity => d.toJson } def toJson: JValue = { JObject( "queryType" -> "topN", "dataSource" -> source, "dimension" -> dimension, "metric" -> metric.toJson, "granularity" -> g, "aggregations" -> aggregate.map(_.toJson), "postAggregations" -> postAggregate.map(_.toJson), "intervals" -> Time.intervalToString(interval), "filter" -> filter.toJson, "threshold" -> limit) } } case class TopNSelectResponse(data: Seq[Map[String, Any]]) object TopNSelectResponse { implicit val formats = org.json4s.DefaultFormats def parse(js: JValue): TopNSelectResponse = { val jss = js \ "result" jss match { case JArray(results) => val data = results.map { r => r.asInstanceOf[JObject].values } TopNSelectResponse(data) case JNothing => TopNSelectResponse(null) case err @ _ => throw new IllegalArgumentException("Invalid topN response: " + err) } } } case class TopN2SelectResponse(data: Seq[(DateTime, Seq[Map[String, Any]])]) object TopN2SelectResponse { implicit val formats = org.json4s.DefaultFormats def parse(js: JValue): TopN2SelectResponse = { js match { case JArray(results) => val data = results.map { r => val time = Time.parse((r \ "timestamp").extract[String]) var map = Map.empty[String, Any] (r \ "result") match { case JArray(arr) => val d = arr.map { a => a.asInstanceOf[JObject].values } time -> d } } TopN2SelectResponse(data) case JNothing => TopN2SelectResponse(null) case err @ _ => throw new IllegalArgumentException("Invalid time series response: " + err) } } }
Example 4
Source File: SelectQuery.scala From XSQL with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.datasources.druid import org.joda.time.Interval import org.json4s.JsonAST._ import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods._ case class SelectQuery( source: String, interval: Interval, descending: String = "false", granularity: Granularity, dimensions: Array[String], filter: QueryFilter = QueryFilter.All, limit: PagingSpec = PagingSpec(null, 20)) { val g: JValue = granularity match { case SimpleGranularity(name) => name case p: PeriodGranularity => p.toJson case d: DurationGranularity => d.toJson } def toJson: JValue = { JObject( "queryType" -> "select", "dataSource" -> source, "granularity" -> g, "descending" -> descending, "dimensions" -> render(dimensions.toList), "intervals" -> Time.intervalToString(interval), "filter" -> filter.toJson, "pagingSpec" -> limit.toJson) } } case class SelectResponse(data: Seq[Map[String, Any]]) object SelectResponse { implicit val formats = org.json4s.DefaultFormats def parse(js: JValue): SelectResponse = { val jss = js \ "result" \ "events" jss match { case JArray(results) => val data = results.map { r => (r \ "event").asInstanceOf[JObject].values } SelectResponse(data) case JNothing => SelectResponse(null) case err @ _ => throw new IllegalArgumentException("Invalid select response: " + err) } } }
Example 5
Source File: package.scala From pureconfig with Mozilla Public License 2.0 | 5 votes |
package pureconfig.module import org.joda.time.{ DateTimeZone, Duration, Instant, Interval } import org.joda.time.format.{ DateTimeFormat, DateTimeFormatter } import pureconfig.{ ConfigConvert, ConfigReader } import pureconfig.ConfigConvert.{ catchReadError, viaNonEmptyString } package object joda { implicit def instantConfigConvert: ConfigConvert[Instant] = ConfigConvert[Long].xmap(new Instant(_), _.getMillis) implicit def intervalConfigConvert: ConfigConvert[Interval] = viaNonEmptyString[Interval]( catchReadError(Interval.parseWithOffset), _.toString) implicit def durationConfigConvert: ConfigConvert[Duration] = viaNonEmptyString[Duration]( catchReadError(Duration.parse), _.toString) implicit def dateTimeFormatterConfigConvert: ConfigReader[DateTimeFormatter] = ConfigReader.fromNonEmptyString[DateTimeFormatter]( catchReadError(DateTimeFormat.forPattern)) implicit def dateTimeZoneConfigConvert: ConfigConvert[DateTimeZone] = viaNonEmptyString[DateTimeZone]( catchReadError(DateTimeZone.forID), _.getID) }
Example 6
Source File: Utils.scala From spark-datetime with Apache License 2.0 | 5 votes |
package org.sparklinedata.spark.dateTime import org.joda.time.{DateTime, Period, Interval} import scala.collection.mutable.ArrayBuffer object Utils { def intervalToSeq(i : Interval, p : Period) : Seq[DateTime] = { import com.github.nscala_time.time.Imports._ val b = new ArrayBuffer[DateTime] var d = i.getStart while(d < i.getEnd) { b += d d = d + p } b.toSeq } }
Example 7
Source File: DruidMetadataCommands.scala From spark-druid-olap with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.sparklinedata.commands import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.execution.command.RunnableCommand import org.apache.spark.sql.hive.sparklinedata.SPLSessionState import org.apache.spark.sql.sources.druid.{DruidPlanner, DruidQueryCostModel} import org.apache.spark.sql.types._ import org.apache.spark.sql.util.PlanUtil import org.apache.spark.sql.{Row, SQLContext, SparkSession} import org.joda.time.Interval import org.sparklinedata.druid.metadata.{DruidMetadataCache, DruidRelationName, DruidRelationOptions} case class ClearMetadata(druidHost: Option[String]) extends RunnableCommand { override val output: Seq[Attribute] = { val schema = StructType( StructField("", StringType, nullable = true) :: Nil) schema.toAttributes } override def run(sparkSession: SparkSession): Seq[Row] = { if (druidHost.isDefined) { DruidMetadataCache.clearCache(druidHost.get) } else { DruidMetadataCache.clearCache } Seq(Row("")) } } case class ExplainDruidRewrite(sql: String) extends RunnableCommand { override val output: Seq[Attribute] = { val schema = StructType( StructField("", StringType, nullable = true) :: Nil) schema.toAttributes } override def run(sparkSession: SparkSession): Seq[Row] = { val qe = sparkSession.sessionState.executeSql(sql) qe.sparkPlan.toString().split("\n").map(Row(_)).toSeq ++ Seq(Row("")) ++ DruidPlanner.getDruidRDDs(qe.sparkPlan).flatMap { dR => val druidDSIntervals = dR.drDSIntervals val druidDSFullName= dR.drFullName val druidDSOptions = dR.drOptions val inputEstimate = dR.inputEstimate val outputEstimate = dR.outputEstimate s"""DruidQuery(${System.identityHashCode(dR.dQuery)}) details :: |${DruidQueryCostModel.computeMethod( sparkSession.sqlContext, druidDSIntervals, druidDSFullName, druidDSOptions, inputEstimate, outputEstimate, dR.dQuery.q) } """.stripMargin.split("\n").map(Row(_)) } } }
Example 8
Source File: DruidRelation.scala From spark-druid-olap with Apache License 2.0 | 5 votes |
package org.sparklinedata.druid import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, ExprId} import org.apache.spark.sql.sources.{BaseRelation, TableScan} import org.apache.spark.sql.types.{DataType, StructField, StructType} import org.apache.spark.sql.{Row, SQLContext} import org.joda.time.Interval import org.sparklinedata.druid.metadata.DruidRelationInfo case class DruidOperatorAttribute(exprId : ExprId, name : String, dataType : DataType, tf: String = null) override val needConversion: Boolean = false override def schema: StructType = dQuery.map(_.schema(info)).getOrElse(info.sourceDF(sqlContext).schema) def buildInternalScan : RDD[InternalRow] = dQuery.map(new DruidRDD(sqlContext, info, _)).getOrElse( info.sourceDF(sqlContext).queryExecution.toRdd ) override def buildScan(): RDD[Row] = buildInternalScan.asInstanceOf[RDD[Row]] override def toString : String = { if (dQuery.isDefined) { s"DruidQuery(${System.identityHashCode(dQuery)}): ${Utils.queryToString(dQuery.get)}" } else { info.toString } } }
Example 9
Source File: BacktestBenchmarks.scala From Scala-High-Performance-Programming with MIT License | 5 votes |
package highperfscala.concurrency.backtesting import java.util.concurrent.TimeUnit import highperfscala.concurrency.backtesting.Backtest.{BacktestPerformanceSummary, DecisionDelayMillis} import org.joda.time.{DateTime, Interval, MonthDay} import org.openjdk.jmh.annotations.Mode._ import org.openjdk.jmh.annotations._ import scala.annotation.tailrec @BenchmarkMode(Array(Throughput)) @OutputTimeUnit(TimeUnit.SECONDS) @Warmup(iterations = 3, time = 5, timeUnit = TimeUnit.SECONDS) @Measurement(iterations = 30, time = 10, timeUnit = TimeUnit.SECONDS) @Fork(value = 1, warmups = 1, jvmArgs = Array("-Xms1G", "-Xmx1G")) class BacktestBenchmarks { import BacktestBenchmarks._ @Benchmark def withoutConcurrency(state: BenchmarkState): BacktestPerformanceSummary = Backtest.backtestWithoutConcurrency(state.backtestDays, state.decisionDelay) .unsafePerformSync @Benchmark def withBatchedForking(state: BenchmarkState): BacktestPerformanceSummary = Backtest.backtestWithBatchedForking(state.backtestDays, state.decisionDelay) .unsafePerformSync @Benchmark def withAllForked(state: BenchmarkState): BacktestPerformanceSummary = Backtest.backtestWithAllForked(state.backtestDays, state.decisionDelay) .unsafePerformSync } object BacktestBenchmarks { private def daysWithin(i: Interval): List[MonthDay] = { @tailrec def recurse(xs: List[MonthDay], current: DateTime): List[MonthDay] = current.isAfter(i.getEnd) match { case true => xs case false => recurse( new MonthDay(current.getMonthOfYear, current.getDayOfMonth) :: xs, current.plusDays(1)) } recurse(Nil, i.getStart) } // Constant starting point to avoid differences due to number of days // per month private val end: DateTime = new DateTime(2016, 1, 1, 0, 0, 0, 0) private def trailingMonths(backtestIntervalMonths: Int): Interval = new Interval( end.minusMonths(backtestIntervalMonths), end) @State(Scope.Benchmark) class BenchmarkState { @Param(Array("1", "10")) var decisionDelayMillis: Long = 0 @Param(Array("1", "12", "24" )) var backtestIntervalMonths: Int = 0 var decisionDelay: DecisionDelayMillis = DecisionDelayMillis(-1) var backtestDays: List[MonthDay] = Nil @Setup def setup(): Unit = { decisionDelay = DecisionDelayMillis(decisionDelayMillis) backtestDays = daysWithin(trailingMonths(backtestIntervalMonths)) } } }
Example 10
Source File: Backtesting.scala From Scala-High-Performance-Programming with MIT License | 5 votes |
package highperfscala.concurrency.backtesting import java.util.concurrent.TimeUnit import org.joda.time.{DateTime, Interval} import scala.concurrent.{Await, Future} object Backtesting { sealed trait Strategy case class PnL(value: BigDecimal) extends AnyVal case class BacktestPerformanceSummary(pnl: PnL) case class Ticker(value: String) extends AnyVal def backtest( strategy: Strategy, ticker: Ticker, testInterval: Interval): BacktestPerformanceSummary = ??? sealed trait VectorBasedReturnSeriesFrame def loadReturns(testInterval: Interval): VectorBasedReturnSeriesFrame = ??? case object Dave1 extends Strategy case object Dave2 extends Strategy object Serial { def lastMonths(months: Int): Interval = new Interval(new DateTime().minusMonths(months), new DateTime()) backtest(Dave1, Ticker("AAPL"), lastMonths(3)) backtest(Dave1, Ticker("GOOG"), lastMonths(3)) backtest(Dave2, Ticker("AAPL"), lastMonths(3)) backtest(Dave2, Ticker("GOOG"), lastMonths(2)) } object ForComprehension { def lastMonths(months: Int): Interval = new Interval(new DateTime().minusMonths(months), new DateTime()) implicit val ec = scala.concurrent.ExecutionContext.Implicits.global val summariesF = for { firstDaveAapl <- Future(backtest(Dave1, Ticker("AAPL"), lastMonths(3))) firstDaveGoog <- Future(backtest(Dave1, Ticker("GOOG"), lastMonths(3))) secondDaveAapl <- Future(backtest(Dave2, Ticker("AAPL"), lastMonths(3))) secondDaveGoog <- Future(backtest(Dave2, Ticker("GOOG"), lastMonths(2))) } yield (firstDaveAapl, firstDaveGoog, secondDaveAapl, secondDaveGoog) Await.result(summariesF, scala.concurrent.duration.Duration(1, TimeUnit.SECONDS)) Future(1).flatMap(f1 => Future(2).flatMap(f2 => Future(3).map(f3 => (f1, f2, f3)))) } object Concurrency { def lastMonths(months: Int): Interval = new Interval(new DateTime().minusMonths(months), new DateTime()) implicit val ec = scala.concurrent.ExecutionContext.Implicits.global val firstDaveAaplF = Future(backtest(Dave1, Ticker("AAPL"), lastMonths(3))) val firstDaveGoogF = Future(backtest(Dave1, Ticker("GOOG"), lastMonths(3))) val secondDaveAaplF = Future(backtest(Dave2, Ticker("AAPL"), lastMonths(3))) val secondDaveGoogF = Future(backtest(Dave2, Ticker("GOOG"), lastMonths(2))) val z = for { firstDaveAapl <- firstDaveAaplF firstDaveGoog <- firstDaveGoogF secondDaveAapl <- secondDaveAaplF secondDaveGoog <- secondDaveGoogF } yield (firstDaveAapl, firstDaveGoog, secondDaveAapl, secondDaveGoog) } }
Example 11
Source File: PerformanceReporting.scala From Scala-High-Performance-Programming with MIT License | 5 votes |
package highperfscala.clientreports.views import org.joda.time.{Duration, Instant, Interval} object PerformanceReporting { def trend( now: () => Instant, findOrders: (Interval, Ticker) => List[Order], findExecutions: (Interval, Ticker) => List[Execution], request: GenerateTradingPerformanceTrend): List[TradingPerformanceTrend] = { def periodPnL( duration: Duration): Map[Ticker, PeriodPnL] = { val currentTime = now() val interval = new Interval(currentTime.minus(duration), currentTime) (for { ticker <- request.tickers orders = findOrders(interval, ticker) executions = findExecutions(interval, ticker) idToExecPrice = executions.groupBy(_.id).mapValues(es => Price.average(es.map(_.price))) signedExecutionPrices = for { o <- orders if o.clientId == request.clientId price <- idToExecPrice.get(o.id).map(p => o match { case _: BuyOrder => Price(p.value * -1) case _: SellOrder => p }).toList } yield price trend = signedExecutionPrices.foldLeft(PnL.zero) { case (pnl, p) => PnL(pnl.value + p.value) } match { case p if p.value >= PnL.zero.value => PeriodPositive case _ => PeriodNegative } } yield ticker -> trend).toMap } val tickerToLastHour = periodPnL(Duration.standardHours(1)).mapValues { case PeriodPositive => LastHourPositive case PeriodNegative => LastHourNegative } val tickerToLastDay = periodPnL(Duration.standardDays(1)).mapValues { case PeriodPositive => LastDayPositive case PeriodNegative => LastDayNegative } val tickerToLastSevenDays = periodPnL(Duration.standardDays(7)).mapValues { case PeriodPositive => LastSevenDayPositive case PeriodNegative => LastSevenDayNegative } tickerToLastHour.zip(tickerToLastDay).zip(tickerToLastSevenDays).map({ case (((t, lastHour), (_, lastDay)), (_, lastSevenDays)) => TradingPerformanceTrend(t, lastHour, lastDay, lastSevenDays) }).toList } }
Example 12
Source File: ViewPerformanceReporting.scala From Scala-High-Performance-Programming with MIT License | 5 votes |
package highperfscala.clientreports.views import org.joda.time.{Duration, Instant, Interval} object ViewPerformanceReporting { def trend( now: () => Instant, findOrders: (Interval, Ticker) => List[Order], findExecutions: (Interval, Ticker) => List[Execution], request: GenerateTradingPerformanceTrend): List[TradingPerformanceTrend] = { def periodPnL( duration: Duration): Map[Ticker, PeriodPnL] = { val currentTime = now() val interval = new Interval(currentTime.minus(duration), currentTime) (for { ticker <- request.tickers orders = findOrders(interval, ticker) executions = findExecutions(interval, ticker) idToExecPrice = executions.groupBy(_.id).mapValues(es => Price.average(es.map(_.price))) signedExecutionPrices = for { o <- orders.view if o.clientId == request.clientId price <- idToExecPrice.get(o.id).map(p => o match { case _: BuyOrder => Price(p.value * -1) case _: SellOrder => p }).toList } yield price trend = signedExecutionPrices.foldLeft(PnL.zero) { case (pnl, p) => PnL(pnl.value + p.value) } match { case p if p.value >= PnL.zero.value => PeriodPositive case _ => PeriodNegative } } yield ticker -> trend).toMap } val tickerToLastHour = periodPnL(Duration.standardHours(1)).mapValues { case PeriodPositive => LastHourPositive case PeriodNegative => LastHourNegative } val tickerToLastDay = periodPnL(Duration.standardDays(1)).mapValues { case PeriodPositive => LastDayPositive case PeriodNegative => LastDayNegative } val tickerToLastSevenDays = periodPnL(Duration.standardDays(7)).mapValues { case PeriodPositive => LastSevenDayPositive case PeriodNegative => LastSevenDayNegative } tickerToLastHour.zip(tickerToLastDay).zip(tickerToLastSevenDays).map({ case (((t, lastHour), (_, lastDay)), (_, lastSevenDays)) => TradingPerformanceTrend(t, lastHour, lastDay, lastSevenDays) }).toList } }