java.io.Closeable Scala Examples

The following examples show how to use java.io.Closeable. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: ConfigPrefix.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.codegen.util

import java.io.Closeable
import com.typesafe.config.ConfigFactory
import io.getquill.JdbcContextConfig
import javax.sql.DataSource
import OptionOps._

import scala.util.Try

sealed trait ConfigPrefix {
  def value: String
  def packagePath: String

  private[getquill] def loadConfig =
    ConfigFactory
      .load(getClass.getClassLoader, "application-codegen.conf")
      .getConfig(value)

  private[getquill] def makeDatasource: DataSource with Closeable =
    JdbcContextConfig(loadConfig).dataSource

}

object ConfigPrefix {
  sealed trait TestH2DB extends ConfigPrefix { val value = "testH2DB"; val packagePath = "h2" }
  sealed trait TestMysqlDB extends ConfigPrefix { val value = "testMysqlDB"; val packagePath = "mysql" }
  sealed trait TestOracleDB extends ConfigPrefix { val value = "testOracleDB"; val packagePath = "oracle" }
  sealed trait TestPostgresDB extends ConfigPrefix { val value = "testPostgresDB"; val packagePath = "postgres" }
  sealed trait TestSqliteDB extends ConfigPrefix { val value = "testSqliteDB"; val packagePath = "sqlite" }
  sealed trait TestSqlServerDB extends ConfigPrefix { val value = "testSqlServerDB"; val packagePath = "sqlserver" }

  case object TestH2DB extends TestH2DB
  case object TestMysqlDB extends TestMysqlDB
  case object TestOracleDB extends TestOracleDB
  case object TestPostgresDB extends TestPostgresDB
  case object TestSqliteDB extends TestSqliteDB
  case object TestSqlServerDB extends TestSqlServerDB

  def all = List(
    TestH2DB,
    TestMysqlDB,
    TestOracleDB,
    TestPostgresDB,
    TestSqliteDB,
    TestSqlServerDB
  )

  def fromValue(str: String): Try[ConfigPrefix] =
    ConfigPrefix.all.find(_.value == str).toTry(new IllegalArgumentException(s"Could not find the value: '${str}'"))
} 
Example 2
Source File: MetricsSystem.scala    From kyuubi   with Apache License 2.0 5 votes vote down vote up
package yaooqinn.kyuubi.metrics

import java.io.Closeable
import java.lang.management.ManagementFactory
import java.util.concurrent.TimeUnit

import com.codahale.metrics._
import com.codahale.metrics.jvm._
import org.apache.kyuubi.Logging
import org.apache.spark.{KyuubiSparkUtil, SparkConf}
import org.apache.spark.KyuubiConf._

private[kyuubi] class MetricsSystem(conf: SparkConf) extends Logging {

  private val registry = new MetricRegistry

  registry.registerAll(new GarbageCollectorMetricSet)
  registry.registerAll(new MemoryUsageGaugeSet)
  registry.registerAll(new BufferPoolMetricSet(ManagementFactory.getPlatformMBeanServer))
  registry.registerAll(new ThreadStatesGaugeSet)
  registry.registerAll(new ClassLoadingGaugeSet)

  private val reportInterval = KyuubiSparkUtil.timeStringAsMs(conf.get(METRICS_REPORT_INTERVAL))

  private val reporter: Array[Closeable] =
    conf.get(METRICS_REPORTER).split(",").map(_.trim.toUpperCase).flatMap {
    case "CONSOLE" =>
      val reporter = ConsoleReporter.forRegistry(registry)
        .convertDurationsTo(TimeUnit.SECONDS)
        .convertDurationsTo(TimeUnit.MILLISECONDS)
        .build()
      reporter.start(reportInterval, TimeUnit.MILLISECONDS)
      Some(reporter)
    case "JMX" =>
      val reporter = JmxReporter.forRegistry(registry)
        .convertRatesTo(TimeUnit.SECONDS)
        .convertDurationsTo(TimeUnit.MILLISECONDS)
        .build()
      reporter.start()
      Some(reporter)
    case "JSON" =>
      val reporter = new JsonFileReporter(conf, registry)
      reporter.start()
      Some(reporter)
    case other =>
      warn(s"$other as a metrics report is not support yet")
      None
  }

  def registerGauge[T](name: String, value: => T, default: T): Unit = {
    registry.register(MetricRegistry.name(name), new Gauge[T] {
      override def getValue: T = Option(value).getOrElse(default)
    })
  }

  def close(): Unit = {
    reporter.foreach(_.close())
  }

  val OPEN_CONNECTIONS: Counter = registry.counter(MetricRegistry.name("open_connections"))
  val OPEN_OPERATIONS: Counter = registry.counter(MetricRegistry.name("open_operations"))
  val TOTAL_CONNECTIONS: Counter = registry.counter(MetricRegistry.name("total_connections"))
  val RUNNING_QUERIES: Counter = registry.counter(MetricRegistry.name("running_queries"))
  val ERROR_QUERIES: Counter = registry.counter(MetricRegistry.name("error_queries"))
  val TOTAL_QUERIES: Counter = registry.counter(MetricRegistry.name("total_queries"))
}

object MetricsSystem {

  private var maybeSystem: Option[MetricsSystem] = None

  def init(conf: SparkConf): Option[MetricsSystem] = {
    if (conf.get(METRICS_ENABLE).toBoolean) {
      val system = new MetricsSystem(conf)
      maybeSystem = Some(system)
      maybeSystem
    } else {
      None
    }
  }

  def get: Option[MetricsSystem] = maybeSystem

  def close(): Unit = {
    maybeSystem.foreach(_.close())
    maybeSystem = None
  }
} 
Example 3
Source File: EmbeddedService.scala    From affinity   with Apache License 2.0 5 votes vote down vote up
package io.amient.affinity.kafka

import java.io.{Closeable, File}
import java.nio.file.Files

private[kafka] trait EmbeddedService extends Closeable {

  val testDir: File = Files.createTempDirectory(this.getClass.getSimpleName).toFile
  testDir.mkdirs()

  override def close(): Unit = {
    def deleteDirectory(f: File): Unit = if (f.exists) {
      if (f.isDirectory) f.listFiles.foreach(deleteDirectory)
      if (!f.delete) throw new RuntimeException(s"Failed to delete ${f.getAbsolutePath}")
    }
    deleteDirectory(testDir)
  }

} 
Example 4
Source File: Cache.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.util

import java.io.Closeable
import java.lang.System.{ currentTimeMillis => now }

import scala.concurrent.duration.Duration

class Cache[K, V <: Closeable] {

  private case class Entry(value: Option[V], expiration: Long)

  private var cache = Map[K, Entry]()

  def getOrElseUpdate(key: K, value: => Option[V], ttl: Duration): Option[V] =
    synchronized {
      evict()
      val expiration = now + ttl.toMillis
      cache.get(key) match {
        case Some(entry) =>
          cache += key -> entry.copy(expiration = expiration)
          entry.value
        case None =>
          val v = value
          cache += key -> Entry(v, expiration)
          v
      }
    }

  private def evict() =
    for ((key, Entry(value, expiration)) <- cache)
      if (now > expiration) {
        value.map(_.close)
        cache -= key
      }
} 
Example 5
Source File: OracleMonixJdbcContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill

import java.io.Closeable

import com.typesafe.config.Config
import io.getquill.context.jdbc.OracleJdbcContextBase
import io.getquill.context.monix.{ MonixJdbcContext, Runner }
import io.getquill.util.LoadConfig
import javax.sql.DataSource

class OracleMonixJdbcContext[N <: NamingStrategy](
  val naming:     N,
  val dataSource: DataSource with Closeable,
  runner:         Runner
) extends MonixJdbcContext[OracleDialect, N](dataSource, runner)
  with OracleJdbcContextBase[N] {

  def this(naming: N, config: JdbcContextConfig, runner: Runner) = this(naming, config.dataSource, runner)
  def this(naming: N, config: Config, runner: Runner) = this(naming, JdbcContextConfig(config), runner)
  def this(naming: N, configPrefix: String, runner: Runner) = this(naming, LoadConfig(configPrefix), runner)
  def this(naming: N, configPrefix: String) = this(naming, LoadConfig(configPrefix), Runner.default)
} 
Example 6
Source File: PostgresMonixJdbcContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill

import java.io.Closeable

import com.typesafe.config.Config
import io.getquill.context.jdbc.PostgresJdbcContextBase
import io.getquill.context.monix.{ MonixJdbcContext, Runner }
import io.getquill.util.LoadConfig
import javax.sql.DataSource

class PostgresMonixJdbcContext[N <: NamingStrategy](
  val naming:     N,
  val dataSource: DataSource with Closeable,
  runner:         Runner
) extends MonixJdbcContext[PostgresDialect, N](dataSource, runner)
  with PostgresJdbcContextBase[N] {

  def this(naming: N, config: JdbcContextConfig, runner: Runner) = this(naming, config.dataSource, runner)
  def this(naming: N, config: Config, runner: Runner) = this(naming, JdbcContextConfig(config), runner)
  def this(naming: N, configPrefix: String, runner: Runner) = this(naming, LoadConfig(configPrefix), runner)
  def this(naming: N, configPrefix: String) = this(naming, LoadConfig(configPrefix), Runner.default)
} 
Example 7
Source File: SqlServerMonixJdbcContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill

import java.io.Closeable

import com.typesafe.config.Config
import io.getquill.context.jdbc.SqlServerJdbcContextBase
import io.getquill.context.monix.{ MonixJdbcContext, Runner }
import io.getquill.util.LoadConfig
import javax.sql.DataSource

class SqlServerMonixJdbcContext[N <: NamingStrategy](
  val naming:     N,
  val dataSource: DataSource with Closeable,
  runner:         Runner
) extends MonixJdbcContext[SQLServerDialect, N](dataSource, runner)
  with SqlServerJdbcContextBase[N] {

  def this(naming: N, config: JdbcContextConfig, runner: Runner) = this(naming, config.dataSource, runner)
  def this(naming: N, config: Config, runner: Runner) = this(naming, JdbcContextConfig(config), runner)
  def this(naming: N, configPrefix: String, runner: Runner) = this(naming, LoadConfig(configPrefix), runner)
  def this(naming: N, configPrefix: String) = this(naming, LoadConfig(configPrefix), Runner.default)
} 
Example 8
Source File: SqliteMonixJdbcContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill

import java.io.Closeable

import com.typesafe.config.Config
import io.getquill.context.jdbc.SqliteJdbcContextBase
import io.getquill.context.monix.{ MonixJdbcContext, Runner }
import io.getquill.util.LoadConfig
import javax.sql.DataSource

class SqliteMonixJdbcContext[N <: NamingStrategy](
  val naming:     N,
  val dataSource: DataSource with Closeable,
  runner:         Runner
) extends MonixJdbcContext[SqliteDialect, N](dataSource, runner)
  with SqliteJdbcContextBase[N] {

  def this(naming: N, config: JdbcContextConfig, runner: Runner) = this(naming, config.dataSource, runner)
  def this(naming: N, config: Config, runner: Runner) = this(naming, JdbcContextConfig(config), runner)
  def this(naming: N, configPrefix: String, runner: Runner) = this(naming, LoadConfig(configPrefix), runner)
  def this(naming: N, configPrefix: String) = this(naming, LoadConfig(configPrefix), Runner.default)
} 
Example 9
Source File: H2MonixJdbcContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill

import java.io.Closeable

import com.typesafe.config.Config
import io.getquill.context.jdbc.H2JdbcContextBase
import io.getquill.context.monix.{ MonixJdbcContext, Runner }
import io.getquill.util.LoadConfig
import javax.sql.DataSource

class H2MonixJdbcContext[N <: NamingStrategy](
  val naming:     N,
  val dataSource: DataSource with Closeable,
  runner:         Runner
) extends MonixJdbcContext[H2Dialect, N](dataSource, runner)
  with H2JdbcContextBase[N] {

  def this(naming: N, config: JdbcContextConfig, runner: Runner) = this(naming, config.dataSource, runner)
  def this(naming: N, config: Config, runner: Runner) = this(naming, JdbcContextConfig(config), runner)
  def this(naming: N, configPrefix: String, runner: Runner) = this(naming, LoadConfig(configPrefix), runner)
  def this(naming: N, configPrefix: String) = this(naming, LoadConfig(configPrefix), Runner.default)
} 
Example 10
Source File: MysqlMonixJdbcContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill

import java.io.Closeable

import com.typesafe.config.Config
import io.getquill.context.jdbc.MysqlJdbcContextBase
import io.getquill.context.monix.{ MonixJdbcContext, Runner }
import io.getquill.util.LoadConfig
import javax.sql.DataSource

class MysqlMonixJdbcContext[N <: NamingStrategy](
  val naming:     N,
  val dataSource: DataSource with Closeable,
  runner:         Runner
) extends MonixJdbcContext[MySQLDialect, N](dataSource, runner)
  with MysqlJdbcContextBase[N] {

  def this(naming: N, config: JdbcContextConfig, runner: Runner) = this(naming, config.dataSource, runner)
  def this(naming: N, config: Config, runner: Runner) = this(naming, JdbcContextConfig(config), runner)
  def this(naming: N, configPrefix: String, runner: Runner) = this(naming, LoadConfig(configPrefix), runner)
  def this(naming: N, configPrefix: String) = this(naming, LoadConfig(configPrefix), Runner.default)
} 
Example 11
Source File: CacheSpec.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.util

import java.io.Closeable

import scala.concurrent.duration.DurationInt

import io.getquill.Spec

class CacheSpec extends Spec {

  class Value extends Closeable {
    var closes = 0
    override def close = closes += 1
  }

  "caches hits" in {
    val cache = new Cache[Int, Value]

    val value = new Value
    var calls = 0
    def _value = {
      calls += 1
      Some(value)
    }

    cache.getOrElseUpdate(1, _value, 1.second) mustEqual Some(value)
    cache.getOrElseUpdate(1, _value, 1.second) mustEqual Some(value)

    calls mustEqual 1
  }

  "caches misses" in {
    val cache = new Cache[Int, Value]

    var calls = 0
    def _value = {
      calls += 1
      None
    }

    cache.getOrElseUpdate(1, _value, 1.second) mustEqual None
    cache.getOrElseUpdate(1, _value, 1.second) mustEqual None

    calls mustEqual 1
  }

  "expires keys" in {
    val cache = new Cache[Int, Value]

    val value = new Value
    var calls = 0
    def _value = {
      calls += 1
      Some(value)
    }

    cache.getOrElseUpdate(1, _value, -1.milliseconds)
    cache.getOrElseUpdate(2, None, -1.milliseconds)

    calls mustEqual 1
    value.closes mustEqual 1

    cache.getOrElseUpdate(1, _value, 1.second)

    calls mustEqual 2
    value.closes mustEqual 1
  }
} 
Example 12
Source File: Context.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.context

import scala.language.higherKinds
import scala.language.experimental.macros
import io.getquill.dsl.CoreDsl
import io.getquill.util.Messages.fail
import java.io.Closeable

import scala.util.Try
import io.getquill.{ Query, Action, NamingStrategy, BatchAction, ReturnAction, ActionReturning }

trait Context[Idiom <: io.getquill.idiom.Idiom, Naming <: NamingStrategy]
  extends Closeable
  with CoreDsl {

  type Result[T]
  type RunQuerySingleResult[T]
  type RunQueryResult[T]
  type RunActionResult
  type RunActionReturningResult[T]
  type RunBatchActionResult
  type RunBatchActionReturningResult[T]
  type Session

  type Prepare = PrepareRow => (List[Any], PrepareRow)
  type Extractor[T] = ResultRow => T

  case class BatchGroup(string: String, prepare: List[Prepare])
  case class BatchGroupReturning(string: String, returningBehavior: ReturnAction, prepare: List[Prepare])

  def probe(statement: String): Try[_]

  def idiom: Idiom
  def naming: Naming

  def run[T](quoted: Quoted[T]): Result[RunQuerySingleResult[T]] = macro QueryMacro.runQuerySingle[T]
  def run[T](quoted: Quoted[Query[T]]): Result[RunQueryResult[T]] = macro QueryMacro.runQuery[T]
  def prepare[T](quoted: Quoted[Query[T]]): Session => Result[PrepareRow] = macro QueryMacro.prepareQuery[T]

  def run(quoted: Quoted[Action[_]]): Result[RunActionResult] = macro ActionMacro.runAction
  def run[T](quoted: Quoted[ActionReturning[_, T]]): Result[RunActionReturningResult[T]] = macro ActionMacro.runActionReturning[T]
  def run(quoted: Quoted[BatchAction[Action[_]]]): Result[RunBatchActionResult] = macro ActionMacro.runBatchAction
  def run[T](quoted: Quoted[BatchAction[ActionReturning[_, T]]]): Result[RunBatchActionReturningResult[T]] = macro ActionMacro.runBatchActionReturning[T]
  def prepare(quoted: Quoted[Action[_]]): Session => Result[PrepareRow] = macro ActionMacro.prepareAction
  def prepare(quoted: Quoted[BatchAction[Action[_]]]): Session => Result[List[PrepareRow]] = macro ActionMacro.prepareBatchAction

  protected val identityPrepare: Prepare = (Nil, _)
  protected val identityExtractor = identity[ResultRow] _

  protected def handleSingleResult[T](list: List[T]) =
    list match {
      case value :: Nil => value
      case other        => fail(s"Expected a single result but got $other")
    }
} 
Example 13
Source File: SchemaMaker.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.codegen.util

import java.io.Closeable

import io.getquill._
import io.getquill.codegen.integration.DbHelper
import javax.sql.DataSource
import org.scalatest.freespec.AnyFreeSpec

import scala.language.implicitConversions

abstract class CodegenSpec extends AnyFreeSpec with SchemaMaker {
  type Prefix <: ConfigPrefix
  val prefix: Prefix

  implicit def regToOption[T](t: T) = Some(t)
}

object SchemaMaker extends SchemaMaker

case class SchemaMakerCoordinates(dbPrefix: ConfigPrefix, naming: NamingStrategy, schemaConfig: SchemaConfig)

trait SchemaMaker {

  private[getquill] def withDatasource[T](schemaConfig: SchemaConfig, dbPrefix: ConfigPrefix)(testCode: DataSource with Closeable => T): T = {
    val ds = dbPrefix.makeDatasource
    val helper = new DbHelper(schemaConfig, dbPrefix, ds)
    DbHelper.dropTables(ds)
    helper.setup()
    testCode(ds)
  }

  def withContext[T](coords: SchemaMakerCoordinates)(testCode: => T): T = {
    import coords._
    withDatasource(schemaConfig, dbPrefix)(ds => {
      testCode
    })
  }
} 
Example 14
Source File: JsonFileReporter.scala    From kyuubi   with Apache License 2.0 5 votes vote down vote up
package yaooqinn.kyuubi.metrics

import java.io.{BufferedWriter, Closeable, IOException, OutputStreamWriter}
import java.util.{Timer, TimerTask}
import java.util.concurrent.TimeUnit

import scala.util.Try
import scala.util.control.NonFatal

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.json.MetricsModule
import com.fasterxml.jackson.databind.ObjectMapper
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.fs.permission.FsPermission
import org.apache.kyuubi.Logging
import org.apache.spark.{KyuubiSparkUtil, SparkConf}
import org.apache.spark.KyuubiConf._

private[metrics] class JsonFileReporter(conf: SparkConf, registry: MetricRegistry)
  extends Closeable with Logging {

  private val jsonMapper = new ObjectMapper().registerModule(
    new MetricsModule(TimeUnit.MILLISECONDS, TimeUnit.MILLISECONDS, false))
  private val timer = new Timer(true)
  private val interval = KyuubiSparkUtil.timeStringAsMs(conf.get(METRICS_REPORT_INTERVAL))
  private val path = conf.get(METRICS_REPORT_LOCATION)
  private val hadoopConf = KyuubiSparkUtil.newConfiguration(conf)

  def start(): Unit = {
    timer.schedule(new TimerTask {
      var bw: BufferedWriter = _
      override def run(): Unit = try {
        val json = jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(registry)
        val tmpPath = new Path(path + ".tmp")
        val tmpPathUri = tmpPath.toUri
        val fs = if (tmpPathUri.getScheme == null && tmpPathUri.getAuthority == null) {
          FileSystem.getLocal(hadoopConf)
        } else {
          FileSystem.get(tmpPathUri, hadoopConf)
        }
        fs.delete(tmpPath, true)
        bw = new BufferedWriter(new OutputStreamWriter(fs.create(tmpPath, true)))
        bw.write(json)
        bw.close()
        fs.setPermission(tmpPath, FsPermission.createImmutable(Integer.parseInt("644", 8).toShort))
        val finalPath = new Path(path)
        fs.rename(tmpPath, finalPath)
        fs.setPermission(finalPath,
          FsPermission.createImmutable(Integer.parseInt("644", 8).toShort))
      } catch {
        case NonFatal(e) => error("Error writing metrics to json file" + path, e)
      } finally {
        if (bw != null) {
          Try(bw.close())
        }
      }
    }, 0, interval)
  }

  override def close(): Unit = {
    timer.cancel()
  }
} 
Example 15
Source File: WithOracleContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.codegen.util

import java.io.Closeable

import io.getquill.codegen.integration.CodegenTestCases._
import io.getquill.codegen.util.ConfigPrefix.{ TestOracleDB => TheDB }
import io.getquill.{ Literal, SnakeCase, OracleDialect => TheDialect, OracleJdbcContext => TheContext }
import javax.sql.DataSource

// Need separate object for Oracle contexts since WithOracleContext itself is excluded in builds
// that are not compiling with Oracle
object WithOracleContextStandalone
  extends WithContextAux with WithOracleContext

trait WithOracleContext extends WithContextAux {
  import io.getquill.codegen.generated.oracle._

  implicit def oracleSimpleContextForTest1: Aux[TheDB, `1-simple-snake`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `1-simple-snake`](TheDB, `1-simple-snake`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def oracleSimpleContextForTest2: Aux[TheDB, `2-simple-literal`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `2-simple-literal`](TheDB, `2-simple-literal`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def oracleContextForTest1: Aux[TheDB, `1-comp-sanity`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `1-comp-sanity`](TheDB, `1-comp-sanity`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def oracleContextForTest2: Aux[TheDB, `2-comp-stereo-single`, TheContext[SnakeCase] with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]] =
    new WithContextBase[TheDB, `2-comp-stereo-single`](TheDB, `2-comp-stereo-single`) {
      override type QuillContext = TheContext[SnakeCase] with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new TheContext[SnakeCase](SnakeCase, ds) with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]
    }

  implicit def oracleContextForTest3: Aux[TheDB, `3-comp-stereo-oneschema`, TheContext[Literal] with `3-comp-stereo-oneschema-lib`.public.PublicExtensions[TheDialect, Literal]] =
    new WithContextBase[TheDB, `3-comp-stereo-oneschema`](TheDB, `3-comp-stereo-oneschema`) {
      override type QuillContext = TheContext[Literal] with `3-comp-stereo-oneschema-lib`.public.PublicExtensions[TheDialect, Literal]
      override protected def makeContext(ds: DataSource with Closeable) = new TheContext[Literal](Literal, ds) with `3-comp-stereo-oneschema-lib`.public.PublicExtensions[TheDialect, Literal]
    }

  implicit def oracleContextForTest4: Aux[TheDB, `4-comp-stereo-twoschema`, TheContext[Literal] with `4-comp-stereo-twoschema-lib`.public.PublicExtensions[TheDialect, Literal] with `4-comp-stereo-twoschema-lib`.common.CommonExtensions[TheDialect, Literal]] = new WithContextBase[TheDB, `4-comp-stereo-twoschema`](TheDB, `4-comp-stereo-twoschema`) {
    override type QuillContext = TheContext[Literal] with `4-comp-stereo-twoschema-lib`.public.PublicExtensions[TheDialect, Literal] with `4-comp-stereo-twoschema-lib`.common.CommonExtensions[TheDialect, Literal]
    override protected def makeContext(ds: DataSource with Closeable) =
      new TheContext[Literal](Literal, ds) with `4-comp-stereo-twoschema-lib`.public.PublicExtensions[TheDialect, Literal] with `4-comp-stereo-twoschema-lib`.common.CommonExtensions[TheDialect, Literal]
  }

  implicit def oracleContextForTest5: Aux[TheDB, `5-comp-non-stereo-allschema`, TheContext[Literal] with `5-comp-non-stereo-allschema-lib`.public.PublicExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.alpha.AlphaExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.bravo.BravoExtensions[TheDialect, Literal]] = new WithContextBase[TheDB, `5-comp-non-stereo-allschema`](TheDB, `5-comp-non-stereo-allschema`) {
    override type QuillContext = TheContext[Literal] with `5-comp-non-stereo-allschema-lib`.public.PublicExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.alpha.AlphaExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.bravo.BravoExtensions[TheDialect, Literal]
    override protected def makeContext(ds: DataSource with Closeable) =
      new TheContext[Literal](Literal, ds) with `5-comp-non-stereo-allschema-lib`.public.PublicExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.alpha.AlphaExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.bravo.BravoExtensions[TheDialect, Literal]
  }

} 
Example 16
Source File: WithMysqlContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.codegen.util

import java.io.Closeable

import io.getquill.{ MySQLDialect => TheDialect, MysqlJdbcContext => TheContext, Literal, SnakeCase }
import io.getquill.codegen.integration.CodegenTestCases._
import io.getquill.codegen.util.ConfigPrefix.{ TestMysqlDB => TheDB }
import javax.sql.DataSource

trait WithMysqlContext extends WithContextAux {
  import io.getquill.codegen.generated.mysql._

  implicit def mysqlSimpleContextForTest1: Aux[TheDB, `1-simple-snake`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `1-simple-snake`](TheDB, `1-simple-snake`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def mysqlSimpleContextForTest2: Aux[TheDB, `2-simple-literal`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `2-simple-literal`](TheDB, `2-simple-literal`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def mysqlContextForTest1: Aux[TheDB, `1-comp-sanity`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `1-comp-sanity`](TheDB, `1-comp-sanity`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def mysqlContextForTest2: Aux[TheDB, `2-comp-stereo-single`, TheContext[SnakeCase] with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]] =
    new WithContextBase[TheDB, `2-comp-stereo-single`](TheDB, `2-comp-stereo-single`) {
      override type QuillContext = TheContext[SnakeCase] with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new TheContext[SnakeCase](SnakeCase, ds) with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]
    }

  implicit def mysqlContextForTest3: Aux[TheDB, `3-comp-stereo-oneschema`, TheContext[Literal] with `3-comp-stereo-oneschema-lib`.public.PublicExtensions[TheDialect, Literal]] =
    new WithContextBase[TheDB, `3-comp-stereo-oneschema`](TheDB, `3-comp-stereo-oneschema`) {
      override type QuillContext = TheContext[Literal] with `3-comp-stereo-oneschema-lib`.public.PublicExtensions[TheDialect, Literal]
      override protected def makeContext(ds: DataSource with Closeable) = new TheContext[Literal](Literal, ds) with `3-comp-stereo-oneschema-lib`.public.PublicExtensions[TheDialect, Literal]
    }

  implicit def mysqlContextForTest4: Aux[TheDB, `4-comp-stereo-twoschema`, TheContext[Literal] with `4-comp-stereo-twoschema-lib`.public.PublicExtensions[TheDialect, Literal] with `4-comp-stereo-twoschema-lib`.common.CommonExtensions[TheDialect, Literal]] = new WithContextBase[TheDB, `4-comp-stereo-twoschema`](TheDB, `4-comp-stereo-twoschema`) {
    override type QuillContext = TheContext[Literal] with `4-comp-stereo-twoschema-lib`.public.PublicExtensions[TheDialect, Literal] with `4-comp-stereo-twoschema-lib`.common.CommonExtensions[TheDialect, Literal]
    override protected def makeContext(ds: DataSource with Closeable) =
      new TheContext[Literal](Literal, ds) with `4-comp-stereo-twoschema-lib`.public.PublicExtensions[TheDialect, Literal] with `4-comp-stereo-twoschema-lib`.common.CommonExtensions[TheDialect, Literal]
  }

  implicit def mysqlContextForTest5: Aux[TheDB, `5-comp-non-stereo-allschema`, TheContext[Literal] with `5-comp-non-stereo-allschema-lib`.public.PublicExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.alpha.AlphaExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.bravo.BravoExtensions[TheDialect, Literal]] = new WithContextBase[TheDB, `5-comp-non-stereo-allschema`](TheDB, `5-comp-non-stereo-allschema`) {
    override type QuillContext = TheContext[Literal] with `5-comp-non-stereo-allschema-lib`.public.PublicExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.alpha.AlphaExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.bravo.BravoExtensions[TheDialect, Literal]
    override protected def makeContext(ds: DataSource with Closeable) =
      new TheContext[Literal](Literal, ds) with `5-comp-non-stereo-allschema-lib`.public.PublicExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.alpha.AlphaExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.bravo.BravoExtensions[TheDialect, Literal]
  }

} 
Example 17
Source File: WithSqliteContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.codegen.util

import java.io.Closeable

import io.getquill.codegen.integration.CodegenTestCases._
import io.getquill.codegen.util.ConfigPrefix.{ TestSqliteDB => TheDB }
import io.getquill.{ SnakeCase, SqliteDialect => TheDialect, SqliteJdbcContext => TheContext }
import javax.sql.DataSource

trait WithSqliteContext extends WithContextAux {
  import io.getquill.codegen.generated.sqlite._

  implicit def sqliteSimpleContextForTest1: Aux[TheDB, `1-simple-snake`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `1-simple-snake`](TheDB, `1-simple-snake`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def sqliteSimpleContextForTest2: Aux[TheDB, `2-simple-literal`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `2-simple-literal`](TheDB, `2-simple-literal`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def sqliteContextForTest1: Aux[TheDB, `1-comp-sanity`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `1-comp-sanity`](TheDB, `1-comp-sanity`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def sqliteContextForTest2: Aux[TheDB, `2-comp-stereo-single`, TheContext[SnakeCase] with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]] =
    new WithContextBase[TheDB, `2-comp-stereo-single`](TheDB, `2-comp-stereo-single`) {
      override type QuillContext = TheContext[SnakeCase] with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new TheContext[SnakeCase](SnakeCase, ds) with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]
    }
} 
Example 18
Source File: WithContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.codegen.util
import java.io.Closeable

import io.getquill.codegen.integration.CodegenTestCases
import io.getquill.context.jdbc.JdbcContext
import javax.sql.DataSource

trait WithContext[Prefix <: ConfigPrefix, CTest <: CodegenTestCases] extends SchemaMaker {
  type QuillContext <: JdbcContext[_, _]

  def ctest: CTest
  def dbPrefix: Prefix

  protected def makeContext(ds: DataSource with Closeable): QuillContext
  def run(testCode: QuillContext => Any): Unit = {
    withContext(ctest.schemaMakerCoordinates(dbPrefix))(testCode)
    ()
  }
}

abstract class WithContextBase[Prefix <: ConfigPrefix, CTest <: CodegenTestCases](val dbPrefix: Prefix, val ctest: CTest) extends WithContext[Prefix, CTest]

// Cannot include WithOracleContext here since it is not being compiled in all builds
object WithContext extends WithContextAux
  with WithH2Context
  with WithMysqlContext
  with WithPostgresContext
  with WithSqliteContext
  with WithSqlServerContext

trait WithContextAux {
  type Aux[Prefix <: ConfigPrefix, CTest <: CodegenTestCases, Ret] = WithContext[Prefix, CTest] { type QuillContext = Ret }
  def apply[Prefix <: ConfigPrefix, CTest <: CodegenTestCases](implicit cft: WithContext[Prefix, CTest]): Aux[Prefix, CTest, cft.QuillContext] = cft
} 
Example 19
Source File: WithH2Context.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.codegen.util

import java.io.Closeable

import io.getquill.{ H2Dialect => TheDialect, H2JdbcContext => TheContext, Literal, SnakeCase }
import io.getquill.codegen.integration.CodegenTestCases._
import io.getquill.codegen.util.ConfigPrefix.{ TestH2DB => TheDB }
import javax.sql.DataSource

trait WithH2Context extends WithContextAux {
  import io.getquill.codegen.generated.h2._

  implicit def h2SimpleContextForTest1: Aux[TheDB, `1-simple-snake`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `1-simple-snake`](TheDB, `1-simple-snake`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def h2SimpleContextForTest2: Aux[TheDB, `2-simple-literal`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `2-simple-literal`](TheDB, `2-simple-literal`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def h2ContextForTest1: Aux[TheDB, `1-comp-sanity`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `1-comp-sanity`](TheDB, `1-comp-sanity`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def h2ContextForTest2: Aux[TheDB, `2-comp-stereo-single`, TheContext[SnakeCase] with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]] =
    new WithContextBase[TheDB, `2-comp-stereo-single`](TheDB, `2-comp-stereo-single`) {
      override type QuillContext = TheContext[SnakeCase] with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new TheContext[SnakeCase](SnakeCase, ds) with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]
    }

  implicit def h2ContextForTest3: Aux[TheDB, `3-comp-stereo-oneschema`, TheContext[Literal] with `3-comp-stereo-oneschema-lib`.public.PublicExtensions[TheDialect, Literal]] =
    new WithContextBase[TheDB, `3-comp-stereo-oneschema`](TheDB, `3-comp-stereo-oneschema`) {
      override type QuillContext = TheContext[Literal] with `3-comp-stereo-oneschema-lib`.public.PublicExtensions[TheDialect, Literal]
      override protected def makeContext(ds: DataSource with Closeable) = new TheContext[Literal](Literal, ds) with `3-comp-stereo-oneschema-lib`.public.PublicExtensions[TheDialect, Literal]
    }

  implicit def h2ContextForTest4: Aux[TheDB, `4-comp-stereo-twoschema`, TheContext[Literal] with `4-comp-stereo-twoschema-lib`.public.PublicExtensions[TheDialect, Literal] with `4-comp-stereo-twoschema-lib`.common.CommonExtensions[TheDialect, Literal]] = new WithContextBase[TheDB, `4-comp-stereo-twoschema`](TheDB, `4-comp-stereo-twoschema`) {
    override type QuillContext = TheContext[Literal] with `4-comp-stereo-twoschema-lib`.public.PublicExtensions[TheDialect, Literal] with `4-comp-stereo-twoschema-lib`.common.CommonExtensions[TheDialect, Literal]
    override protected def makeContext(ds: DataSource with Closeable) =
      new TheContext[Literal](Literal, ds) with `4-comp-stereo-twoschema-lib`.public.PublicExtensions[TheDialect, Literal] with `4-comp-stereo-twoschema-lib`.common.CommonExtensions[TheDialect, Literal]
  }

  implicit def h2ContextForTest5: Aux[TheDB, `5-comp-non-stereo-allschema`, TheContext[Literal] with `5-comp-non-stereo-allschema-lib`.public.PublicExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.alpha.AlphaExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.bravo.BravoExtensions[TheDialect, Literal]] = new WithContextBase[TheDB, `5-comp-non-stereo-allschema`](TheDB, `5-comp-non-stereo-allschema`) {
    override type QuillContext = TheContext[Literal] with `5-comp-non-stereo-allschema-lib`.public.PublicExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.alpha.AlphaExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.bravo.BravoExtensions[TheDialect, Literal]
    override protected def makeContext(ds: DataSource with Closeable) =
      new TheContext[Literal](Literal, ds) with `5-comp-non-stereo-allschema-lib`.public.PublicExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.alpha.AlphaExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.bravo.BravoExtensions[TheDialect, Literal]
  }

} 
Example 20
Source File: WithPostgresContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.codegen.util

import java.io.Closeable

import io.getquill.codegen.integration.CodegenTestCases._
import io.getquill.codegen.util.ConfigPrefix.{ TestPostgresDB => TheDB }
import io.getquill.{ Literal, SnakeCase, PostgresDialect => TheDialect, PostgresJdbcContext => TheContext }
import javax.sql.DataSource

trait WithPostgresContext extends WithContextAux {
  import io.getquill.codegen.generated.postgres._

  implicit def postgresSimpleContextForTest1: Aux[TheDB, `1-simple-snake`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `1-simple-snake`](TheDB, `1-simple-snake`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def postgresSimpleContextForTest2: Aux[TheDB, `2-simple-literal`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `2-simple-literal`](TheDB, `2-simple-literal`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def postgresContextForTest1: Aux[TheDB, `1-comp-sanity`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `1-comp-sanity`](TheDB, `1-comp-sanity`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def postgresContextForTest2: Aux[TheDB, `2-comp-stereo-single`, TheContext[SnakeCase] with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]] =
    new WithContextBase[TheDB, `2-comp-stereo-single`](TheDB, `2-comp-stereo-single`) {
      override type QuillContext = TheContext[SnakeCase] with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new TheContext[SnakeCase](SnakeCase, ds) with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]
    }

  implicit def postgresContextForTest3: Aux[TheDB, `3-comp-stereo-oneschema`, TheContext[Literal] with `3-comp-stereo-oneschema-lib`.public.PublicExtensions[TheDialect, Literal]] =
    new WithContextBase[TheDB, `3-comp-stereo-oneschema`](TheDB, `3-comp-stereo-oneschema`) {
      override type QuillContext = TheContext[Literal] with `3-comp-stereo-oneschema-lib`.public.PublicExtensions[TheDialect, Literal]
      override protected def makeContext(ds: DataSource with Closeable) = new TheContext[Literal](Literal, ds) with `3-comp-stereo-oneschema-lib`.public.PublicExtensions[TheDialect, Literal]
    }

  implicit def postgresContextForTest4: Aux[TheDB, `4-comp-stereo-twoschema`, TheContext[Literal] with `4-comp-stereo-twoschema-lib`.public.PublicExtensions[TheDialect, Literal] with `4-comp-stereo-twoschema-lib`.common.CommonExtensions[TheDialect, Literal]] = new WithContextBase[TheDB, `4-comp-stereo-twoschema`](TheDB, `4-comp-stereo-twoschema`) {
    override type QuillContext = TheContext[Literal] with `4-comp-stereo-twoschema-lib`.public.PublicExtensions[TheDialect, Literal] with `4-comp-stereo-twoschema-lib`.common.CommonExtensions[TheDialect, Literal]
    override protected def makeContext(ds: DataSource with Closeable) =
      new TheContext[Literal](Literal, ds) with `4-comp-stereo-twoschema-lib`.public.PublicExtensions[TheDialect, Literal] with `4-comp-stereo-twoschema-lib`.common.CommonExtensions[TheDialect, Literal]
  }

  implicit def postgresContextForTest5: Aux[TheDB, `5-comp-non-stereo-allschema`, TheContext[Literal] with `5-comp-non-stereo-allschema-lib`.public.PublicExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.alpha.AlphaExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.bravo.BravoExtensions[TheDialect, Literal]] = new WithContextBase[TheDB, `5-comp-non-stereo-allschema`](TheDB, `5-comp-non-stereo-allschema`) {
    override type QuillContext = TheContext[Literal] with `5-comp-non-stereo-allschema-lib`.public.PublicExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.alpha.AlphaExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.bravo.BravoExtensions[TheDialect, Literal]
    override protected def makeContext(ds: DataSource with Closeable) =
      new TheContext[Literal](Literal, ds) with `5-comp-non-stereo-allschema-lib`.public.PublicExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.alpha.AlphaExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.bravo.BravoExtensions[TheDialect, Literal]
  }

} 
Example 21
Source File: WithSqlServerContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.codegen.util

import java.io.Closeable

import io.getquill.codegen.integration.CodegenTestCases._
import io.getquill.codegen.util.ConfigPrefix.{ TestSqlServerDB => TheDB }
import io.getquill.{ Literal, SnakeCase, SQLServerDialect => TheDialect, SqlServerJdbcContext => TheContext }
import javax.sql.DataSource

trait WithSqlServerContext extends WithContextAux {
  import io.getquill.codegen.generated.sqlserver._

  implicit def sqlServerSimpleContextForTest1: Aux[TheDB, `1-simple-snake`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `1-simple-snake`](TheDB, `1-simple-snake`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def sqlServerSimpleContextForTest2: Aux[TheDB, `2-simple-literal`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `2-simple-literal`](TheDB, `2-simple-literal`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def sqlServerContextForTest1: Aux[TheDB, `1-comp-sanity`, TheContext[SnakeCase]] =
    new WithContextBase[TheDB, `1-comp-sanity`](TheDB, `1-comp-sanity`) {
      override type QuillContext = TheContext[SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new QuillContext(SnakeCase, ds)
    }

  implicit def sqlServerContextForTest2: Aux[TheDB, `2-comp-stereo-single`, TheContext[SnakeCase] with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]] =
    new WithContextBase[TheDB, `2-comp-stereo-single`](TheDB, `2-comp-stereo-single`) {
      override type QuillContext = TheContext[SnakeCase] with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]
      override protected def makeContext(ds: DataSource with Closeable) = new TheContext[SnakeCase](SnakeCase, ds) with `2-comp-stereo-single-lib`.public.PublicExtensions[TheDialect, SnakeCase]
    }

  implicit def sqlServerContextForTest3: Aux[TheDB, `3-comp-stereo-oneschema`, TheContext[Literal] with `3-comp-stereo-oneschema-lib`.public.PublicExtensions[TheDialect, Literal]] =
    new WithContextBase[TheDB, `3-comp-stereo-oneschema`](TheDB, `3-comp-stereo-oneschema`) {
      override type QuillContext = TheContext[Literal] with `3-comp-stereo-oneschema-lib`.public.PublicExtensions[TheDialect, Literal]
      override protected def makeContext(ds: DataSource with Closeable) = new TheContext[Literal](Literal, ds) with `3-comp-stereo-oneschema-lib`.public.PublicExtensions[TheDialect, Literal]
    }

  implicit def sqlServerContextForTest4: Aux[TheDB, `4-comp-stereo-twoschema`, TheContext[Literal] with `4-comp-stereo-twoschema-lib`.public.PublicExtensions[TheDialect, Literal] with `4-comp-stereo-twoschema-lib`.common.CommonExtensions[TheDialect, Literal]] = new WithContextBase[TheDB, `4-comp-stereo-twoschema`](TheDB, `4-comp-stereo-twoschema`) {
    override type QuillContext = TheContext[Literal] with `4-comp-stereo-twoschema-lib`.public.PublicExtensions[TheDialect, Literal] with `4-comp-stereo-twoschema-lib`.common.CommonExtensions[TheDialect, Literal]
    override protected def makeContext(ds: DataSource with Closeable) =
      new TheContext[Literal](Literal, ds) with `4-comp-stereo-twoschema-lib`.public.PublicExtensions[TheDialect, Literal] with `4-comp-stereo-twoschema-lib`.common.CommonExtensions[TheDialect, Literal]
  }

  implicit def sqlServerContextForTest5: Aux[TheDB, `5-comp-non-stereo-allschema`, TheContext[Literal] with `5-comp-non-stereo-allschema-lib`.public.PublicExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.alpha.AlphaExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.bravo.BravoExtensions[TheDialect, Literal]] = new WithContextBase[TheDB, `5-comp-non-stereo-allschema`](TheDB, `5-comp-non-stereo-allschema`) {
    override type QuillContext = TheContext[Literal] with `5-comp-non-stereo-allschema-lib`.public.PublicExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.alpha.AlphaExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.bravo.BravoExtensions[TheDialect, Literal]
    override protected def makeContext(ds: DataSource with Closeable) =
      new TheContext[Literal](Literal, ds) with `5-comp-non-stereo-allschema-lib`.public.PublicExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.alpha.AlphaExtensions[TheDialect, Literal] with `5-comp-non-stereo-allschema-lib`.bravo.BravoExtensions[TheDialect, Literal]
  }

} 
Example 22
Source File: BlockTransferService.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network

import java.io.Closeable
import java.nio.ByteBuffer

import scala.concurrent.{Promise, Await, Future}
import scala.concurrent.duration.Duration

import org.apache.spark.Logging
import org.apache.spark.network.buffer.{NioManagedBuffer, ManagedBuffer}
import org.apache.spark.network.shuffle.{ShuffleClient, BlockFetchingListener}
import org.apache.spark.storage.{BlockManagerId, BlockId, StorageLevel}

private[spark]
abstract class BlockTransferService extends ShuffleClient with Closeable with Logging {

  
  def uploadBlockSync(
      hostname: String,
      port: Int,
      execId: String,
      blockId: BlockId,
      blockData: ManagedBuffer,
      level: StorageLevel): Unit = {
    Await.result(uploadBlock(hostname, port, execId, blockId, blockData, level), Duration.Inf)
  }
} 
Example 23
Source File: FileBasedWriteAheadLogReader.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.io.{Closeable, EOFException}
import java.nio.ByteBuffer

import org.apache.hadoop.conf.Configuration
import org.apache.spark.Logging


private[streaming] class FileBasedWriteAheadLogReader(path: String, conf: Configuration)
  extends Iterator[ByteBuffer] with Closeable with Logging {

  private val instream = HdfsUtils.getInputStream(path, conf)
  private var closed = false
  private var nextItem: Option[ByteBuffer] = None

  override def hasNext: Boolean = synchronized {
    if (closed) {
      return false
    }

    if (nextItem.isDefined) { // handle the case where hasNext is called without calling next
      true
    } else {
      try {
        val length = instream.readInt()
        val buffer = new Array[Byte](length)
        instream.readFully(buffer)
        nextItem = Some(ByteBuffer.wrap(buffer))
        logTrace("Read next item " + nextItem.get)
        true
      } catch {
        case e: EOFException =>
          logDebug("Error reading next item, EOF reached", e)
          close()
          false
        case e: Exception =>
          logWarning("Error while trying to read data from HDFS.", e)
          close()
          throw e
      }
    }
  }

  override def next(): ByteBuffer = synchronized {
    val data = nextItem.getOrElse {
      close()
      throw new IllegalStateException(
        "next called without calling hasNext or after hasNext returned false")
    }
    nextItem = None // Ensure the next hasNext call loads new data.
    data
  }

  override def close(): Unit = synchronized {
    if (!closed) {
      instream.close()
    }
    closed = true
  }
} 
Example 24
Source File: FileBasedWriteAheadLogRandomReader.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.io.Closeable
import java.nio.ByteBuffer

import org.apache.hadoop.conf.Configuration


private[streaming] class FileBasedWriteAheadLogRandomReader(path: String, conf: Configuration)
  extends Closeable {

  private val instream = HdfsUtils.getInputStream(path, conf)
  private var closed = false

  def read(segment: FileBasedWriteAheadLogSegment): ByteBuffer = synchronized {
    assertOpen()
    instream.seek(segment.offset)
    val nextLength = instream.readInt()
    HdfsUtils.checkState(nextLength == segment.length,
      s"Expected message length to be ${segment.length}, but was $nextLength")
    val buffer = new Array[Byte](nextLength)
    instream.readFully(buffer)
    ByteBuffer.wrap(buffer)
  }

  override def close(): Unit = synchronized {
    closed = true
    instream.close()
  }

  private def assertOpen() {
    HdfsUtils.checkState(!closed, "Stream is closed. Create a new Reader to read from the file.")
  }
} 
Example 25
Source File: BlockTransferService.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network

import java.io.Closeable
import java.nio.ByteBuffer

import scala.concurrent.{Future, Promise}
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag

import org.apache.spark.internal.Logging
import org.apache.spark.network.buffer.{ManagedBuffer, NioManagedBuffer}
import org.apache.spark.network.shuffle.{BlockFetchingListener, ShuffleClient}
import org.apache.spark.storage.{BlockId, StorageLevel}
import org.apache.spark.util.ThreadUtils

private[spark]
abstract class BlockTransferService extends ShuffleClient with Closeable with Logging {

  
  def uploadBlockSync(
      hostname: String,
      port: Int,
      execId: String,
      blockId: BlockId,
      blockData: ManagedBuffer,
      level: StorageLevel,
      classTag: ClassTag[_]): Unit = {
    val future = uploadBlock(hostname, port, execId, blockId, blockData, level, classTag)
    ThreadUtils.awaitResult(future, Duration.Inf)
  }
} 
Example 26
Source File: FileBasedWriteAheadLogReader.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.io.{IOException, Closeable, EOFException}
import java.nio.ByteBuffer

import org.apache.hadoop.conf.Configuration
import org.apache.spark.Logging


private[streaming] class FileBasedWriteAheadLogReader(path: String, conf: Configuration)
  extends Iterator[ByteBuffer] with Closeable with Logging {

  private val instream = HdfsUtils.getInputStream(path, conf)
  private var closed = (instream == null) // the file may be deleted as we're opening the stream
  private var nextItem: Option[ByteBuffer] = None

  override def hasNext: Boolean = synchronized {
    if (closed) {
      return false
    }

    if (nextItem.isDefined) { // handle the case where hasNext is called without calling next
      true
    } else {
      try {
        val length = instream.readInt()
        val buffer = new Array[Byte](length)
        instream.readFully(buffer)
        nextItem = Some(ByteBuffer.wrap(buffer))
        logTrace("Read next item " + nextItem.get)
        true
      } catch {
        case e: EOFException =>
          logDebug("Error reading next item, EOF reached", e)
          close()
          false
        case e: IOException =>
          logWarning("Error while trying to read data. If the file was deleted, " +
            "this should be okay.", e)
          close()
          if (HdfsUtils.checkFileExists(path, conf)) {
            // If file exists, this could be a legitimate error
            throw e
          } else {
            // File was deleted. This can occur when the daemon cleanup thread takes time to
            // delete the file during recovery.
            false
          }

        case e: Exception =>
          logWarning("Error while trying to read data from HDFS.", e)
          close()
          throw e
      }
    }
  }

  override def next(): ByteBuffer = synchronized {
    val data = nextItem.getOrElse {
      close()
      throw new IllegalStateException(
        "next called without calling hasNext or after hasNext returned false")
    }
    nextItem = None // Ensure the next hasNext call loads new data.
    data
  }

  override def close(): Unit = synchronized {
    if (!closed) {
      instream.close()
    }
    closed = true
  }
} 
Example 27
Source File: MetadataStream.scala    From spark-bam   with Apache License 2.0 5 votes vote down vote up
package org.hammerlab.bgzf.block

import java.io.{ Closeable, EOFException }

import hammerlab.iterator.SimpleIterator
import org.hammerlab.bgzf.block.Block.FOOTER_SIZE
import org.hammerlab.bgzf.block.Header.EXPECTED_HEADER_SIZE
import org.hammerlab.channel.ByteChannel
import org.hammerlab.io.Buffer


case class MetadataStream(ch: ByteChannel)
  extends SimpleIterator[Metadata]
    with Closeable {

  // Buffer for the standard bits of the header that we care about
  implicit val buf = Buffer(EXPECTED_HEADER_SIZE)

  override protected def _advance: Option[Metadata] = {

    val start = ch.position()

    buf.clear()
    val Header(actualHeaderSize, compressedSize) =
      try {
        Header(ch)
      } catch {
        case e: EOFException ⇒
          return None
      }

    val remainingBytes = compressedSize - actualHeaderSize

    ch.skip(remainingBytes - 4)
    val uncompressedSize = ch.getInt

    val dataLength = remainingBytes - FOOTER_SIZE

    if (dataLength == 2) {
      // Skip empty block at end of file
      None
    } else
      Some(
        Metadata(
          start,
          compressedSize,
          uncompressedSize
        )
      )
  }

  override def close(): Unit =
    ch.close()
} 
Example 28
Source File: UncompressedBytes.scala    From spark-bam   with Apache License 2.0 5 votes vote down vote up
package org.hammerlab.bgzf.block

import java.io.Closeable

import hammerlab.iterator._
import org.hammerlab.bgzf.Pos
import org.hammerlab.channel.{ ByteChannel, SeekableByteChannel }


case class SeekableUncompressedBytes(blockStream: SeekableStream)
  extends UncompressedBytesI[SeekableStream] {
  def seek(pos: Pos): Unit = {
    blockStream.seek(pos.blockPos)
    uncompressedBytes.reset()
    clear()
    curBlock
      .foreach {
        block ⇒
          block.idx = pos.offset
      }

  }
}

object SeekableUncompressedBytes {
  def apply(ch: SeekableByteChannel): SeekableUncompressedBytes =
    SeekableUncompressedBytes(
      SeekableStream(
        ch
      )
    )
} 
Example 29
Source File: Utils.scala    From scala-clippy   with Apache License 2.0 5 votes vote down vote up
package com.softwaremill.clippy

import java.io.{ByteArrayOutputStream, InputStream}
import java.io.Closeable
import scala.util.control.NonFatal
import scala.util.{Failure, Try}

object Utils {

  
  def runNonDaemon(t: => Unit) = {
    val shutdownHook = new Thread() {
      private val lock             = new Object
      @volatile private var didRun = false

      override def run() =
        lock.synchronized {
          if (!didRun) {
            t
            didRun = true
          }
        }
    }

    Runtime.getRuntime.addShutdownHook(shutdownHook)
    try shutdownHook.run()
    finally Runtime.getRuntime.removeShutdownHook(shutdownHook)
  }

  def inputStreamToBytes(is: InputStream): Array[Byte] =
    try {
      val baos = new ByteArrayOutputStream()
      val buf  = new Array[Byte](512)
      var read = 0
      while ({ read = is.read(buf, 0, buf.length); read } != -1) {
        baos.write(buf, 0, read)
      }
      baos.toByteArray
    } finally is.close()

  object TryWith {
    def apply[C <: Closeable, R](resource: => C)(f: C => R): Try[R] =
      Try(resource).flatMap(resourceInstance => {
        try {
          val returnValue = f(resourceInstance)
          Try(resourceInstance.close()).map(_ => returnValue)
        } catch {
          case NonFatal(exceptionInFunction) =>
            try {
              resourceInstance.close()
              Failure(exceptionInFunction)
            } catch {
              case NonFatal(exceptionInClose) =>
                exceptionInFunction.addSuppressed(exceptionInClose)
                Failure(exceptionInFunction)
            }
        }
      })
  }
} 
Example 30
Source File: MinstDatasetReader.scala    From zen   with Apache License 2.0 5 votes vote down vote up
package com.github.cloudml.zen.ml.util

import java.io.{Closeable, DataInputStream, FileInputStream, IOException}
import java.util.zip.GZIPInputStream

import org.apache.spark.mllib.linalg.{DenseVector => SDV, Vector => SV}

case class MinstItem(label: Int, data: Array[Int]) {
  def binaryVector: SV = {
    new SDV(data.map { i =>
      if (i > 30) {
        1D
      } else {
        0D
      }
    })
  }
}

class MinstDatasetReader(labelsFile: String, imagesFile: String)
  extends java.util.Iterator[MinstItem] with Closeable with Logging {

  val labelsBuf: DataInputStream = new DataInputStream(new GZIPInputStream(
    new FileInputStream(labelsFile)))
  var magic = labelsBuf.readInt()
  val labelCount = labelsBuf.readInt()
  logInfo(s"Labels magic=$magic count= $labelCount")

  val imagesBuf: DataInputStream = new DataInputStream(new GZIPInputStream(
    new FileInputStream(imagesFile)))
  magic = imagesBuf.readInt()
  val imageCount = imagesBuf.readInt()
  val rows = imagesBuf.readInt()
  val cols = imagesBuf.readInt()
  logInfo(s"Images magic=$magic count=$imageCount rows=$rows cols=$cols")
  assert(imageCount == labelCount)

  var current = 0

  override def next(): MinstItem = {
    try {
      val data = new Array[Int](rows * cols)
      for (i <- 0 until data.length) {
        data(i) = imagesBuf.readUnsignedByte()
      }
      val label = labelsBuf.readUnsignedByte()
      MinstItem(label, data)
    } catch {
      case e: IOException =>
        current = imageCount
        throw e
    }
    finally {
      current += 1
    }
  }

  override def hasNext = current < imageCount

  override def close: Unit = {
    imagesBuf.close()
    labelsBuf.close()
  }

  override def remove {
    throw new UnsupportedOperationException("remove")
  }
} 
Example 31
Source File: WithUtils.scala    From glow   with Apache License 2.0 5 votes vote down vote up
package io.projectglow.common

import java.io.Closeable
import java.util.concurrent.locks.Lock

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Dataset
import org.apache.spark.storage.StorageLevel

import scala.util.control.NonFatal

object WithUtils {

  
  def withCloseable[T <: Closeable, R](closeable: T)(func: T => R): R = {
    var triedToClose = false
    try {
      func(closeable)
    } catch {
      case NonFatal(e) =>
        try {
          closeable.close()
        } catch {
          case NonFatal(t) =>
            e.addSuppressed(t)
        }
        triedToClose = true
        throw e
    } finally {
      // if we haven't tried to close it in the exception handler, try here.
      if (!triedToClose) {
        closeable.close()
      }
    }
  }

  def withLock[T](lock: Lock)(f: => T): T = {
    lock.lock()
    try {
      f
    } finally {
      lock.unlock()
    }
  }

  def withCachedRDD[T, U](rdd: RDD[T])(f: RDD[T] => U): U = {
    // Caching in MEMORY_ONLY (or even MEMORY_AND_DISK) can result in OOMs
    rdd.persist(StorageLevel.DISK_ONLY)
    try {
      f(rdd)
    } finally {
      rdd.unpersist()
    }
  }

  def withCachedDataset[T, U](ds: Dataset[T])(f: Dataset[T] => U): U = {
    // Caching in MEMORY_ONLY (or even MEMORY_AND_DISK) can result in OOMs
    ds.persist(StorageLevel.DISK_ONLY)
    try {
      f(ds)
    } finally {
      ds.unpersist()
    }
  }
} 
Example 32
Source File: HadoopLineIterator.scala    From glow   with Apache License 2.0 5 votes vote down vote up
package io.projectglow.sql.util

import java.io.Closeable
import java.net.URI

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.input.{FileSplit, LineRecordReader}
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.spark.sql.execution.datasources.RecordReaderIterator

import io.projectglow.common.GlowLogging


class HadoopLineIterator(
    path: String,
    start: Long,
    length: Long,
    lineSeparator: Option[Array[Byte]],
    conf: Configuration)
    extends Iterator[Text]
    with Closeable
    with GlowLogging {

  private val iterator = {
    val fileSplit = new FileSplit(
      new Path(new URI(path)),
      start,
      length,
      // TODO: Implement Locality
      Array.empty
    )
    val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
    val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)

    val reader = lineSeparator match {
      case Some(sep) => new LineRecordReader(sep)
      // If the line separator is `None`, it covers `\r`, `\r\n` and `\n`.
      case _ => new LineRecordReader()
    }

    reader.initialize(fileSplit, hadoopAttemptContext)
    new RecordReaderIterator(reader)
  }

  override def hasNext: Boolean = {
    iterator.hasNext
  }

  override def next(): Text = {
    iterator.next()
  }

  override def close(): Unit = {
    iterator.close()
  }
} 
Example 33
Source File: VideoDisplay.scala    From jvm-toxcore-c   with GNU General Public License v3.0 5 votes vote down vote up
package im.tox.tox4j.av.callbacks.video

import java.io.Closeable

import im.tox.tox4j.av.data.{ Height, Width }
import im.tox.tox4j.testing.autotest.AutoTestSuite.timed
import org.scalatest.Assertions

import scala.util.Try

abstract class VideoDisplay[Parsed, Canvas] extends Assertions with Closeable {

  def width: Width
  def height: Height

  protected def canvas: Try[Canvas]
  protected def parse(
    y: Array[Byte], u: Array[Byte], v: Array[Byte],
    yStride: Int, uStride: Int, vStride: Int
  ): Parsed
  protected def displaySent(canvas: Canvas, frameNumber: Int, parsed: Parsed): Unit
  protected def displayReceived(canvas: Canvas, frameNumber: Int, parsed: Parsed): Unit

  final def displaySent(frameNumber: Int, y: Array[Byte], u: Array[Byte], v: Array[Byte]): Unit = {
    val width = this.width.value
    canvas.foreach(displaySent(_, frameNumber, parse(y, u, v, width, width / 2, width / 2)))
  }

  
  final def displayReceived(
    frameNumber: Int,
    y: Array[Byte], u: Array[Byte], v: Array[Byte],
    yStride: Int, uStride: Int, vStride: Int
  ): Option[(Int, Int)] = {
    canvas.toOption.map { canvas =>
      val (parseTime, parsed) = timed(parse(y, u, v, yStride, uStride, vStride))
      val displayTime = timed(displayReceived(canvas, frameNumber, parsed))

      (parseTime, displayTime)
    }
  }

} 
Example 34
Source File: ArmUtils.scala    From advanced-scala-code   with Apache License 2.0 5 votes vote down vote up
package arm




object ArmUtils {

  import java.io.Closeable
  def usingCloseable[A](closeable: Closeable)(block: => A): A = {
    try {
      block
    } finally {
      closeable.close()
    }
  }
  def using[A, C <: { def close() }](closeable: C)(block: => A): A = {
    try {
      block
    } finally {
      closeable.close()
    }
  }
} 
Example 35
Source File: TransportServer.scala    From aloha   with Apache License 2.0 5 votes vote down vote up
package me.jrwang.aloha.transport.server

import java.io.Closeable
import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

import io.netty.bootstrap.ServerBootstrap
import io.netty.channel.{ChannelFuture, ChannelInitializer, ChannelOption}
import io.netty.channel.socket.SocketChannel
import me.jrwang.aloha.common.Logging
import me.jrwang.aloha.common.util.Utils
import me.jrwang.aloha.transport.TransportContext
import me.jrwang.aloha.transport.util.{IOMode, NettyUtils}


class TransportServer(
    transportContext: TransportContext,
    hostToBind: String,
    portToBind: Int,
    appRpcHandler: RpcHandler,
    bootstraps: List[TransportServerBootstrap]
  ) extends Closeable with Logging {
  private val conf  = transportContext.conf

  private var port: Int = -1
  private var bootstrap: ServerBootstrap = _
  private var channelFuture: ChannelFuture = _

  try
    init()
  catch {
    case e: RuntimeException =>
      Utils.closeQuietly(this)
      throw e
  }

  def init(): Unit = {
    val ioMode = IOMode.valueOf(conf.ioMode)
    val bossGroup = NettyUtils.createEventLoop(ioMode, conf.serverThreads, conf.module + "-server")
    val workerGroup = bossGroup
    val allocator = NettyUtils.createPooledByteBufAllocator(conf.preferDirectBufs, true , conf.serverThreads)

    bootstrap = new ServerBootstrap()
      .group(bossGroup, workerGroup)
      .channel(NettyUtils.getServerChannelClass(ioMode))
      .option(ChannelOption.ALLOCATOR, allocator)
      .childOption(ChannelOption.ALLOCATOR, allocator)

    if (conf.backLog > 0)
      bootstrap.option[java.lang.Integer](ChannelOption.SO_BACKLOG, conf.backLog)
    if (conf.receiveBuf > 0)
      bootstrap.childOption[java.lang.Integer](ChannelOption.SO_RCVBUF, conf.receiveBuf)
    if (conf.sendBuf > 0)
      bootstrap.childOption[java.lang.Integer](ChannelOption.SO_SNDBUF, conf.sendBuf)

    bootstrap.childHandler(new ChannelInitializer[SocketChannel]() {
      override protected def initChannel(ch: SocketChannel): Unit = {
        val rpcHandler = bootstraps.foldLeft[RpcHandler](appRpcHandler)((r, b) => {
          b.doBootstrap(ch, r)
        })
        transportContext.initializePipeline(ch, rpcHandler)
      }
    })

    val address = if (hostToBind == null)
      new InetSocketAddress(portToBind)
    else
      new InetSocketAddress(hostToBind, portToBind)
    channelFuture = bootstrap.bind(address)
    channelFuture.syncUninterruptibly
    port = channelFuture.channel.localAddress.asInstanceOf[InetSocketAddress].getPort
    logDebug(s"Transport server started on port: $port")
  }

  def getPort: Int = {
    if (port == -1)
      throw new IllegalStateException("Server not initialized")
    port
  }

  def awaitTermination(): Unit = {
    channelFuture.channel().closeFuture().sync()
  }

  override def close(): Unit = {
    if (channelFuture != null) {
      // close is a local operation and should finish within milliseconds; timeout just to be safe
      channelFuture.channel.close.awaitUninterruptibly(10, TimeUnit.SECONDS)
      channelFuture = null
    }
    if (bootstrap != null && bootstrap.config().group() != null)
      bootstrap.config().group().shutdownGracefully
    if (bootstrap != null && bootstrap.config().childGroup() != null)
      bootstrap.config().childGroup().shutdownGracefully
    bootstrap = null
  }
} 
Example 36
Source File: package.scala    From ClassDependenceAnalyser   with GNU General Public License v2.0 5 votes vote down vote up
package com.github.jllk

import java.io.{Closeable, IOException}


package object analyser {

  def inSafe[A](input: Closeable)(fun: => A): A = {
    require(input != null)
    try {
      fun
    } finally {
      try {
        input.close()
      } catch {
        case e: IOException => println(s"IOException happened! ${e.getMessage}")
      }
    }
  }

  def isEmpty(str: String): Boolean = str match {
    case null | "" => true
    case _ => false
  }

  def remove[A](list: List[A], index: Int) = {
    val (start, _ :: end) = list.splitAt(index)
    start ::: end
  }


} 
Example 37
Source File: Using.scala    From unicorn   with Apache License 2.0 5 votes vote down vote up
package unicorn.util

import java.io.Closeable


object Using {
  def apply[S <: Closeable, T](resource: S)(use: S => T): T = {
    try {
      use(resource)
    }
    finally {
      if (resource != null) resource.close
    }
  }
} 
Example 38
Source File: BlockTransferService.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network

import java.io.Closeable
import java.nio.ByteBuffer

import scala.concurrent.{Promise, Await, Future}
import scala.concurrent.duration.Duration

import org.apache.spark.Logging
import org.apache.spark.network.buffer.{NioManagedBuffer, ManagedBuffer}
import org.apache.spark.network.shuffle.{ShuffleClient, BlockFetchingListener}
import org.apache.spark.storage.{BlockManagerId, BlockId, StorageLevel}

private[spark]
abstract class BlockTransferService extends ShuffleClient with Closeable with Logging {

  
  def uploadBlockSync(
      hostname: String,
      port: Int,
      execId: String,
      blockId: BlockId,
      blockData: ManagedBuffer,
      level: StorageLevel): Unit = {
    Await.result(uploadBlock(hostname, port, execId, blockId, blockData, level), Duration.Inf)
  }
} 
Example 39
Source File: FileBasedWriteAheadLogReader.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.io.{Closeable, EOFException, IOException}
import java.nio.ByteBuffer

import org.apache.hadoop.conf.Configuration

import org.apache.spark.internal.Logging


private[streaming] class FileBasedWriteAheadLogReader(path: String, conf: Configuration)
  extends Iterator[ByteBuffer] with Closeable with Logging {

  private val instream = HdfsUtils.getInputStream(path, conf)
  private var closed = (instream == null) // the file may be deleted as we're opening the stream
  private var nextItem: Option[ByteBuffer] = None

  override def hasNext: Boolean = synchronized {
    if (closed) {
      return false
    }

    if (nextItem.isDefined) { // handle the case where hasNext is called without calling next
      true
    } else {
      try {
        val length = instream.readInt()
        val buffer = new Array[Byte](length)
        instream.readFully(buffer)
        nextItem = Some(ByteBuffer.wrap(buffer))
        logTrace("Read next item " + nextItem.get)
        true
      } catch {
        case e: EOFException =>
          logDebug("Error reading next item, EOF reached", e)
          close()
          false
        case e: IOException =>
          logWarning("Error while trying to read data. If the file was deleted, " +
            "this should be okay.", e)
          close()
          if (HdfsUtils.checkFileExists(path, conf)) {
            // If file exists, this could be a legitimate error
            throw e
          } else {
            // File was deleted. This can occur when the daemon cleanup thread takes time to
            // delete the file during recovery.
            false
          }

        case e: Exception =>
          logWarning("Error while trying to read data from HDFS.", e)
          close()
          throw e
      }
    }
  }

  override def next(): ByteBuffer = synchronized {
    val data = nextItem.getOrElse {
      close()
      throw new IllegalStateException(
        "next called without calling hasNext or after hasNext returned false")
    }
    nextItem = None // Ensure the next hasNext call loads new data.
    data
  }

  override def close(): Unit = synchronized {
    if (!closed) {
      instream.close()
    }
    closed = true
  }
} 
Example 40
Source File: FileBasedWriteAheadLogRandomReader.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.io.Closeable
import java.nio.ByteBuffer

import org.apache.hadoop.conf.Configuration


private[streaming] class FileBasedWriteAheadLogRandomReader(path: String, conf: Configuration)
  extends Closeable {

  private val instream = HdfsUtils.getInputStream(path, conf)
  private var closed = (instream == null) // the file may be deleted as we're opening the stream

  def read(segment: FileBasedWriteAheadLogSegment): ByteBuffer = synchronized {
    assertOpen()
    instream.seek(segment.offset)
    val nextLength = instream.readInt()
    HdfsUtils.checkState(nextLength == segment.length,
      s"Expected message length to be ${segment.length}, but was $nextLength")
    val buffer = new Array[Byte](nextLength)
    instream.readFully(buffer)
    ByteBuffer.wrap(buffer)
  }

  override def close(): Unit = synchronized {
    closed = true
    instream.close()
  }

  private def assertOpen() {
    HdfsUtils.checkState(!closed, "Stream is closed. Create a new Reader to read from the file.")
  }
} 
Example 41
Source File: Reader.scala    From pulsar4s   with Apache License 2.0 5 votes vote down vote up
package com.sksamuel.pulsar4s

import java.io.Closeable
import java.util.concurrent.TimeUnit

import scala.concurrent.duration.Duration

trait Reader[T] extends Closeable {
  def hasMessageAvailable: Boolean
  def topic: Topic
  def next: ConsumerMessage[T]
  def next(duration: Duration): Option[ConsumerMessage[T]]
  def nextAsync[F[_] : AsyncHandler]: F[ConsumerMessage[T]]
  def isConnected: Boolean
  def closeAsync[F[_] : AsyncHandler]: F[Unit]
  def seek(timestamp: Long): Unit
  def seek(messageId: MessageId): Unit
  def seekAsync[F[_] : AsyncHandler](timestamp: Long): F[Unit]
  def seekAsync[F[_] : AsyncHandler](messageId: MessageId): F[Unit] 
  def hasReachedEndOfTopic: Boolean
}

class DefaultReader[T](reader: org.apache.pulsar.client.api.Reader[T],
                       override val topic: Topic) extends Reader[T] {

  override def hasMessageAvailable: Boolean = reader.hasMessageAvailable

  override def next: ConsumerMessage[T] = ConsumerMessage.fromJava(reader.readNext)

  override def next(duration: Duration): Option[ConsumerMessage[T]] =
    Option(reader.readNext(duration.toSeconds.toInt, TimeUnit.SECONDS)).map(ConsumerMessage.fromJava)

  override def nextAsync[F[_] : AsyncHandler]: F[ConsumerMessage[T]] = implicitly[AsyncHandler[F]].nextAsync(reader)

  override def isConnected: Boolean = reader.isConnected

  override def close(): Unit = reader.close()
  override def closeAsync[F[_] : AsyncHandler]: F[Unit] = implicitly[AsyncHandler[F]].close(reader)

  override def seek(timestamp: Long): Unit = reader.seek(timestamp)
  override def seek(messageId: MessageId): Unit = reader.seek(messageId)
  override def seekAsync[F[_] : AsyncHandler](timestamp: Long): F[Unit] = implicitly[AsyncHandler[F]].seekAsync(reader, timestamp)
  override def seekAsync[F[_] : AsyncHandler](messageId: MessageId): F[Unit] = implicitly[AsyncHandler[F]].seekAsync(reader, messageId)

  
  override def hasReachedEndOfTopic: Boolean = reader.hasReachedEndOfTopic

} 
Example 42
Source File: SocketAuthHelperSuite.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.security

import java.io.Closeable
import java.net._

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.internal.config._
import org.apache.spark.util.Utils

class SocketAuthHelperSuite extends SparkFunSuite {

  private val conf = new SparkConf()
  private val authHelper = new SocketAuthHelper(conf)

  test("successful auth") {
    Utils.tryWithResource(new ServerThread()) { server =>
      Utils.tryWithResource(server.createClient()) { client =>
        authHelper.authToServer(client)
        server.close()
        server.join()
        assert(server.error == null)
        assert(server.authenticated)
      }
    }
  }

  test("failed auth") {
    Utils.tryWithResource(new ServerThread()) { server =>
      Utils.tryWithResource(server.createClient()) { client =>
        val badHelper = new SocketAuthHelper(new SparkConf().set(AUTH_SECRET_BIT_LENGTH, 128))
        intercept[IllegalArgumentException] {
          badHelper.authToServer(client)
        }
        server.close()
        server.join()
        assert(server.error != null)
        assert(!server.authenticated)
      }
    }
  }

  private class ServerThread extends Thread with Closeable {

    private val ss = new ServerSocket()
    ss.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0))

    @volatile var error: Exception = _
    @volatile var authenticated = false

    setDaemon(true)
    start()

    def createClient(): Socket = {
      new Socket(InetAddress.getLoopbackAddress(), ss.getLocalPort())
    }

    override def run(): Unit = {
      var clientConn: Socket = null
      try {
        clientConn = ss.accept()
        authHelper.authClient(clientConn)
        authenticated = true
      } catch {
        case e: Exception =>
          error = e
      } finally {
        Option(clientConn).foreach(_.close())
      }
    }

    override def close(): Unit = {
      try {
        ss.close()
      } finally {
        interrupt()
      }
    }

  }

} 
Example 43
Source File: BlockTransferService.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network

import java.io.Closeable
import java.nio.ByteBuffer

import scala.concurrent.{Future, Promise}
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag

import org.apache.spark.internal.Logging
import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer, NioManagedBuffer}
import org.apache.spark.network.shuffle.{BlockFetchingListener, ShuffleClient, TempFileManager}
import org.apache.spark.storage.{BlockId, StorageLevel}
import org.apache.spark.util.ThreadUtils

private[spark]
abstract class BlockTransferService extends ShuffleClient with Closeable with Logging {

  
  def uploadBlockSync(
      hostname: String,
      port: Int,
      execId: String,
      blockId: BlockId,
      blockData: ManagedBuffer,
      level: StorageLevel,
      classTag: ClassTag[_]): Unit = {
    val future = uploadBlock(hostname, port, execId, blockId, blockData, level, classTag)
    ThreadUtils.awaitResult(future, Duration.Inf)
  }
} 
Example 44
Source File: FileBasedWriteAheadLogReader.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.io.{Closeable, EOFException, IOException}
import java.nio.ByteBuffer

import org.apache.hadoop.conf.Configuration

import org.apache.spark.internal.Logging


private[streaming] class FileBasedWriteAheadLogReader(path: String, conf: Configuration)
  extends Iterator[ByteBuffer] with Closeable with Logging {

  private val instream = HdfsUtils.getInputStream(path, conf)
  private var closed = (instream == null) // the file may be deleted as we're opening the stream
  private var nextItem: Option[ByteBuffer] = None

  override def hasNext: Boolean = synchronized {
    if (closed) {
      return false
    }

    if (nextItem.isDefined) { // handle the case where hasNext is called without calling next
      true
    } else {
      try {
        val length = instream.readInt()
        val buffer = new Array[Byte](length)
        instream.readFully(buffer)
        nextItem = Some(ByteBuffer.wrap(buffer))
        logTrace("Read next item " + nextItem.get)
        true
      } catch {
        case e: EOFException =>
          logDebug("Error reading next item, EOF reached", e)
          close()
          false
        case e: IOException =>
          logWarning("Error while trying to read data. If the file was deleted, " +
            "this should be okay.", e)
          close()
          if (HdfsUtils.checkFileExists(path, conf)) {
            // If file exists, this could be a legitimate error
            throw e
          } else {
            // File was deleted. This can occur when the daemon cleanup thread takes time to
            // delete the file during recovery.
            false
          }

        case e: Exception =>
          logWarning("Error while trying to read data from HDFS.", e)
          close()
          throw e
      }
    }
  }

  override def next(): ByteBuffer = synchronized {
    val data = nextItem.getOrElse {
      close()
      throw new IllegalStateException(
        "next called without calling hasNext or after hasNext returned false")
    }
    nextItem = None // Ensure the next hasNext call loads new data.
    data
  }

  override def close(): Unit = synchronized {
    if (!closed) {
      instream.close()
    }
    closed = true
  }
} 
Example 45
Source File: FileBasedWriteAheadLogRandomReader.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.io.Closeable
import java.nio.ByteBuffer

import org.apache.hadoop.conf.Configuration


private[streaming] class FileBasedWriteAheadLogRandomReader(path: String, conf: Configuration)
  extends Closeable {

  private val instream = HdfsUtils.getInputStream(path, conf)
  private var closed = (instream == null) // the file may be deleted as we're opening the stream

  def read(segment: FileBasedWriteAheadLogSegment): ByteBuffer = synchronized {
    assertOpen()
    instream.seek(segment.offset)
    val nextLength = instream.readInt()
    HdfsUtils.checkState(nextLength == segment.length,
      s"Expected message length to be ${segment.length}, but was $nextLength")
    val buffer = new Array[Byte](nextLength)
    instream.readFully(buffer)
    ByteBuffer.wrap(buffer)
  }

  override def close(): Unit = synchronized {
    closed = true
    instream.close()
  }

  private def assertOpen() {
    HdfsUtils.checkState(!closed, "Stream is closed. Create a new Reader to read from the file.")
  }
} 
Example 46
Source File: HadoopFileWholeTextReader.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources

import java.io.Closeable
import java.net.URI

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl

import org.apache.spark.input.WholeTextFileRecordReader


class HadoopFileWholeTextReader(file: PartitionedFile, conf: Configuration)
  extends Iterator[Text] with Closeable {
  private val iterator = {
    val fileSplit = new CombineFileSplit(
      Array(new Path(new URI(file.filePath))),
      Array(file.start),
      Array(file.length),
      // TODO: Implement Locality
      Array.empty[String])
    val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
    val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)
    val reader = new WholeTextFileRecordReader(fileSplit, hadoopAttemptContext, 0)
    reader.initialize(fileSplit, hadoopAttemptContext)
    new RecordReaderIterator(reader)
  }

  override def hasNext: Boolean = iterator.hasNext

  override def next(): Text = iterator.next()

  override def close(): Unit = iterator.close()
} 
Example 47
Source File: HadoopFileLinesReader.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources

import java.io.Closeable
import java.net.URI

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.input.{FileSplit, LineRecordReader}
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl


class HadoopFileLinesReader(
    file: PartitionedFile, conf: Configuration) extends Iterator[Text] with Closeable {
  private val iterator = {
    val fileSplit = new FileSplit(
      new Path(new URI(file.filePath)),
      file.start,
      file.length,
      // TODO: Implement Locality
      Array.empty)
    val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
    val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)
    val reader = new LineRecordReader()
    reader.initialize(fileSplit, hadoopAttemptContext)
    new RecordReaderIterator(reader)
  }

  override def hasNext: Boolean = iterator.hasNext

  override def next(): Text = iterator.next()

  override def close(): Unit = iterator.close()
} 
Example 48
Source File: RecordReaderIterator.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources

import java.io.Closeable

import org.apache.hadoop.mapreduce.RecordReader

import org.apache.spark.sql.catalyst.InternalRow


class RecordReaderIterator[T](
    private[this] var rowReader: RecordReader[_, T]) extends Iterator[T] with Closeable {
  private[this] var havePair = false
  private[this] var finished = false

  override def hasNext: Boolean = {
    if (!finished && !havePair) {
      finished = !rowReader.nextKeyValue
      if (finished) {
        // Close and release the reader here; close() will also be called when the task
        // completes, but for tasks that read from many files, it helps to release the
        // resources early.
        close()
      }
      havePair = !finished
    }
    !finished
  }

  override def next(): T = {
    if (!hasNext) {
      throw new java.util.NoSuchElementException("End of stream")
    }
    havePair = false
    rowReader.getCurrentValue
  }

  override def close(): Unit = {
    if (rowReader != null) {
      try {
        rowReader.close()
      } finally {
        rowReader = null
      }
    }
  }
} 
Example 49
Source File: FileBasedWriteAheadLogReader.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.io.{Closeable, EOFException}
import java.nio.ByteBuffer

import org.apache.hadoop.conf.Configuration
import org.apache.spark.Logging


private[streaming] class FileBasedWriteAheadLogReader(path: String, conf: Configuration)
  extends Iterator[ByteBuffer] with Closeable with Logging {

  private val instream = HdfsUtils.getInputStream(path, conf)
  private var closed = false
  //None被声明为一个对象,而不是一个类,在没有值的时候,使用None,如果有值可以引用,就使用Some来包含这个值,都是Option的子类
  private var nextItem: Option[ByteBuffer] = None

  override def hasNext: Boolean = synchronized {
    if (closed) {
       //如果已关闭,就肯定不hasNext了
      return false
    }
  
    if (nextItem.isDefined) { // handle the case where hasNext is called without calling next
      true
    } else {
      try {
         //读出来下一条,如果有,就说明还确实 hasNext
        val length = instream.readInt()
        val buffer = new Array[Byte](length)
        instream.readFully(buffer)
        nextItem = Some(ByteBuffer.wrap(buffer))
        logTrace("Read next item " + nextItem.get)
        true
      } catch {
        case e: EOFException =>
          logDebug("Error reading next item, EOF reached", e)
          close()
          false
        case e: Exception =>
          logWarning("Error while trying to read data from HDFS.", e)
          close()
          throw e
      }
    }
  }

  override def next(): ByteBuffer = synchronized {
    val data = nextItem.getOrElse {
      close()
      throw new IllegalStateException(
        "next called without calling hasNext or after hasNext returned false")
    }
    //确保下一个调用hasNext加载新的数据
    //None被声明为一个对象,而不是一个类,在没有值的时候,使用None,如果有值可以引用,就使用Some来包含这个值,都是Option的子类
    nextItem = None // Ensure the next hasNext call loads new data.
    data
  }

  override def close(): Unit = synchronized {
    if (!closed) {
      instream.close()
    }
    closed = true
  }
} 
Example 50
Source File: FileBasedWriteAheadLogRandomReader.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.io.Closeable
import java.nio.ByteBuffer

import org.apache.hadoop.conf.Configuration


  def read(segment: FileBasedWriteAheadLogSegment): ByteBuffer = synchronized {
    assertOpen()
     //seek 到这条 log所在的 offset
    instream.seek(segment.offset)
    //读一下length
    val nextLength = instream.readInt()
    HdfsUtils.checkState(nextLength == segment.length,
      s"Expected message length to be ${segment.length}, but was $nextLength")
    val buffer = new Array[Byte](nextLength)
     //读一下具体的内容
    instream.readFully(buffer)
     //以 ByteBuffer 的形式,返回具体的内容
    ByteBuffer.wrap(buffer)
  }

  override def close(): Unit = synchronized {
    closed = true
    instream.close()
  }

  private def assertOpen() {
    HdfsUtils.checkState(!closed, "Stream is closed. Create a new Reader to read from the file.")
  }
} 
Example 51
Source File: Executor.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.scheduler.executer

import java.io.Closeable

import com.webank.wedatasphere.linkis.protocol.engine.EngineState
import com.webank.wedatasphere.linkis.protocol.engine.EngineState.EngineState


trait Executor extends Closeable {
  def getId: Long
  def execute(executeRequest: ExecuteRequest): ExecuteResponse
  def state: ExecutorState.ExecutorState

  def getExecutorInfo: ExecutorInfo
}
object ExecutorState {
  type ExecutorState = EngineState
  val Starting = EngineState.Starting
  val Idle = EngineState.Idle
  val Busy = EngineState.Busy
  val ShuttingDown = EngineState.ShuttingDown
  val Error = EngineState.Error
  val Dead = EngineState.Dead
  val Success = EngineState.Success

  def apply(x: Int): ExecutorState = EngineState(x)
  def isCompleted(state: ExecutorState) = EngineState.isCompleted(state.asInstanceOf[EngineState])
  def isAvailable(state: ExecutorState) = EngineState.isAvailable(state.asInstanceOf[EngineState])
} 
Example 52
Source File: package.scala    From maha   with Apache License 2.0 5 votes vote down vote up
// Copyright 2017, Yahoo Holdings Inc.
// Licensed under the terms of the Apache License 2.0. Please see LICENSE file in project root for terms.
package com.yahoo.maha

import java.io.Closeable

import org.slf4j.LoggerFactory

import scala.util.{Failure, Success, Try}


package object report {
  private lazy val logger = LoggerFactory.getLogger("safeOperation")
  def safeOperation[A, B](operation: A)(cleanup: A => Unit)(doWork: A => B): Try[B] = {
    try {
      Success(doWork(operation))
    } catch {
      case e: Exception =>
        logger.error("Failed on safeOperation doWork", e)
        try {
          if (operation != null) {
            cleanup(operation)
          }
        } catch {
          case e: Exception =>
            logger.error("Failed on safeOperation cleanup", e)
        }
        Failure(e)
    }
  }

  def safeCloseable[A <: Closeable, B](closeable: A)(doWork: A => B): Try[B] = {
    safeOperation(closeable)(_.close())(doWork)
  }

} 
Example 53
Source File: package.scala    From maha   with Apache License 2.0 5 votes vote down vote up
// Copyright 2017, Yahoo Holdings Inc.
// Licensed under the terms of the Apache License 2.0. Please see LICENSE file in project root for terms.
package com.yahoo.maha.service

import java.io.Closeable

import com.google.common.io.Closer
import com.yahoo.maha.service.error.{FailedToConstructFactory, JsonParseError, MahaServiceError}

import scala.reflect.runtime.universe._
import scala.reflect.runtime._
import scala.reflect.runtime.{universe => ru}
import _root_.scalaz._
import syntax.validation._


package object factory {

  def getFactory[T](clazz: String)(implicit tag: TypeTag[T]) : MahaServiceConfig.MahaConfigResult[T] = {
    getFactory(clazz, None)
  }
  def getFactory[T](clazz: String, closer: Closer)(implicit tag: TypeTag[T]) : MahaServiceConfig.MahaConfigResult[T] = {
    getFactory(clazz, Option(closer))
  }
  def getFactory[T](clazz: String, closerOption: Option[Closer])(implicit tag: TypeTag[T]) : MahaServiceConfig.MahaConfigResult[T] = {
    try {
      val m = ru.runtimeMirror(Thread.currentThread().getContextClassLoader)
      val typ: universe.ClassSymbol = m.staticClass(clazz)
      val cm = m.reflectClass(typ)
      val ctor = cm.symbol.primaryConstructor.asMethod
      val t: T = cm.reflectConstructor(ctor).apply().asInstanceOf[T]
      if(t.isInstanceOf[Closeable]) {
        closerOption.foreach(_.register(t.asInstanceOf[Closeable]))
      }
      t.successNel[MahaServiceError]
    } catch {
      case t: Exception =>
        FailedToConstructFactory(s"Failed to construct factory : $clazz", Option(t)).asInstanceOf[MahaServiceError].failureNel[T]
    }

  }

  import org.json4s.scalaz.JsonScalaz._
  implicit def resultToMahaSerivceError[T](r: Result[T]) : MahaServiceConfig.MahaConfigResult[T] = {
    r.leftMap( nel => nel.map(err => JsonParseError(err.toString)))
  }
} 
Example 54
Source File: HadoopFileWholeTextReader.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources

import java.io.Closeable
import java.net.URI

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl

import org.apache.spark.input.WholeTextFileRecordReader


class HadoopFileWholeTextReader(file: PartitionedFile, conf: Configuration)
  extends Iterator[Text] with Closeable {
  private val iterator = {
    val fileSplit = new CombineFileSplit(
      Array(new Path(new URI(file.filePath))),
      Array(file.start),
      Array(file.length),
      // TODO: Implement Locality
      Array.empty[String])
    val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
    val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)
    val reader = new WholeTextFileRecordReader(fileSplit, hadoopAttemptContext, 0)
    reader.initialize(fileSplit, hadoopAttemptContext)
    new RecordReaderIterator(reader)
  }

  override def hasNext: Boolean = iterator.hasNext

  override def next(): Text = iterator.next()

  override def close(): Unit = iterator.close()
} 
Example 55
Source File: HadoopFileLinesReader.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources

import java.io.Closeable
import java.net.URI

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.input.{FileSplit, LineRecordReader}
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl


class HadoopFileLinesReader(
    file: PartitionedFile,
    lineSeparator: Option[Array[Byte]],
    conf: Configuration) extends Iterator[Text] with Closeable {

  def this(file: PartitionedFile, conf: Configuration) = this(file, None, conf)

  private val iterator = {
    val fileSplit = new FileSplit(
      new Path(new URI(file.filePath)),
      file.start,
      file.length,
      // TODO: Implement Locality
      Array.empty)
    val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
    val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)

    val reader = lineSeparator match {
      case Some(sep) => new LineRecordReader(sep)
      // If the line separator is `None`, it covers `\r`, `\r\n` and `\n`.
      case _ => new LineRecordReader()
    }

    reader.initialize(fileSplit, hadoopAttemptContext)
    new RecordReaderIterator(reader)
  }

  override def hasNext: Boolean = iterator.hasNext

  override def next(): Text = iterator.next()

  override def close(): Unit = iterator.close()
} 
Example 56
Source File: RecordReaderIterator.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources

import java.io.Closeable

import org.apache.hadoop.mapreduce.RecordReader

import org.apache.spark.sql.catalyst.InternalRow


class RecordReaderIterator[T](
    private[this] var rowReader: RecordReader[_, T]) extends Iterator[T] with Closeable {
  private[this] var havePair = false
  private[this] var finished = false

  override def hasNext: Boolean = {
    if (!finished && !havePair) {
      finished = !rowReader.nextKeyValue
      if (finished) {
        // Close and release the reader here; close() will also be called when the task
        // completes, but for tasks that read from many files, it helps to release the
        // resources early.
        close()
      }
      havePair = !finished
    }
    !finished
  }

  override def next(): T = {
    if (!hasNext) {
      throw new java.util.NoSuchElementException("End of stream")
    }
    havePair = false
    rowReader.getCurrentValue
  }

  override def close(): Unit = {
    if (rowReader != null) {
      try {
        rowReader.close()
      } finally {
        rowReader = null
      }
    }
  }
} 
Example 57
Source File: LeveldbDeletionActor.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.log.leveldb

import java.io.Closeable

import akka.actor.Actor
import akka.actor.PoisonPill
import akka.actor.Props
import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog._

import org.iq80.leveldb.DB
import org.iq80.leveldb.ReadOptions
import org.iq80.leveldb.WriteOptions

import scala.annotation.tailrec
import scala.concurrent.Promise

private object LeveldbDeletionActor {
  case object DeleteBatch

  def props(leveldb: DB, leveldbReadOptions: ReadOptions, leveldbWriteOptions: WriteOptions, batchSize: Int, toSequenceNr: Long, promise: Promise[Unit]): Props =
    Props(new LeveldbDeletionActor(leveldb, leveldbReadOptions, leveldbWriteOptions, batchSize, toSequenceNr, promise))
}

private class LeveldbDeletionActor(
  val leveldb: DB,
  val leveldbReadOptions: ReadOptions,
  val leveldbWriteOptions: WriteOptions,
  batchSize: Int,
  toSequenceNr: Long,
  promise: Promise[Unit])
  extends Actor with WithBatch {

  import LeveldbDeletionActor._

  val eventKeyIterator: CloseableIterator[EventKey] = newEventKeyIterator

  override def preStart() = self ! DeleteBatch

  override def postStop() = eventKeyIterator.close()

  override def receive = {
    case DeleteBatch =>
      withBatch { batch =>
        eventKeyIterator.take(batchSize).foreach { eventKey =>
          batch.delete(eventKeyBytes(eventKey.classifier, eventKey.sequenceNr))
        }
      }
      if (eventKeyIterator.hasNext) {
        self ! DeleteBatch
      } else {
        promise.success(())
        self ! PoisonPill
      }
  }

  private def newEventKeyIterator: CloseableIterator[EventKey] = {
    new Iterator[EventKey] with Closeable {
      val iterator = leveldb.iterator(leveldbReadOptions.snapshot(leveldb.getSnapshot))
      iterator.seek(eventKeyBytes(EventKey.DefaultClassifier, 1L))

      @tailrec
      override def hasNext: Boolean = {
        val key = eventKey(iterator.peekNext().getKey)
        key != eventKeyEnd &&
          (key.sequenceNr <= toSequenceNr || {
            iterator.seek(eventKeyBytes(key.classifier + 1, 1L))
            hasNext
          })
      }

      override def next() = eventKey(iterator.next().getKey)
      override def close() = {
        iterator.close()
        leveldbReadOptions.snapshot().close()
      }
    }
  }
} 
Example 58
Source File: CassandraEventLogStore.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.log.cassandra

import java.io.Closeable
import java.lang.{ Long => JLong }

import com.datastax.driver.core._
import com.rbmhtechnology.eventuate._
import com.rbmhtechnology.eventuate.log._

import scala.collection.JavaConverters._
import scala.collection.immutable.{ VectorBuilder, Seq }
import scala.concurrent.{ ExecutionContext, Future }

private[eventuate] class CassandraEventLogStore(cassandra: Cassandra, logId: String) {
  val preparedWriteEventStatement: PreparedStatement =
    cassandra.prepareWriteEvent(logId)

  val preparedReadEventsStatement: PreparedStatement =
    cassandra.prepareReadEvents(logId)

  def write(events: Seq[DurableEvent], partition: Long) =
    cassandra.executeBatch { batch =>
      events.foreach { event =>
        batch.add(preparedWriteEventStatement.bind(partition: JLong, event.localSequenceNr: JLong, cassandra.eventToByteBuffer(event)))
      }
    }

  def readAsync(fromSequenceNr: Long, toSequenceNr: Long, max: Int, fetchSize: Int)(implicit executor: ExecutionContext): Future[BatchReadResult] =
    readAsync(fromSequenceNr, toSequenceNr, max, fetchSize, Int.MaxValue, _ => true)

  def readAsync(fromSequenceNr: Long, toSequenceNr: Long, max: Int, scanLimit: Int, fetchSize: Int, filter: DurableEvent => Boolean)(implicit executor: ExecutionContext): Future[BatchReadResult] =
    Future(read(fromSequenceNr, toSequenceNr, max, scanLimit, fetchSize, filter))

  def read(fromSequenceNr: Long, toSequenceNr: Long, max: Int, scanLimit: Int, fetchSize: Int, filter: DurableEvent => Boolean): BatchReadResult = {
    val iter = eventIterator(fromSequenceNr, toSequenceNr, fetchSize)
    val builder = new VectorBuilder[DurableEvent]

    var lastSequenceNr = fromSequenceNr - 1L
    var scanned = 0
    var filtered = 0

    while (iter.hasNext && filtered < max && scanned < scanLimit) {
      val event = iter.next()
      if (filter(event)) {
        builder += event
        filtered += 1
      }
      scanned += 1
      lastSequenceNr = event.localSequenceNr
    }
    BatchReadResult(builder.result(), lastSequenceNr)
  }

  def eventIterator(fromSequenceNr: Long, toSequenceNr: Long, fetchSize: Int): Iterator[DurableEvent] with Closeable =
    new EventIterator(fromSequenceNr, toSequenceNr, fetchSize)

  private class EventIterator(fromSequenceNr: Long, toSequenceNr: Long, fetchSize: Int) extends Iterator[DurableEvent] with Closeable {
    import cassandra.settings._
    import EventLog._

    var currentSequenceNr = math.max(fromSequenceNr, 1L)
    var currentPartition = partitionOf(currentSequenceNr, partitionSize)

    var currentIter = newIter()
    var read = currentSequenceNr != firstSequenceNr(currentPartition, partitionSize)

    def newIter(): Iterator[Row] =
      if (currentSequenceNr > toSequenceNr) Iterator.empty else read(lastSequenceNr(currentPartition, partitionSize) min toSequenceNr).iterator.asScala

    def read(upperSequenceNr: Long): ResultSet =
      cassandra.session.execute(preparedReadEventsStatement.bind(currentPartition: JLong, currentSequenceNr: JLong, upperSequenceNr: JLong).setFetchSize(fetchSize))

    @annotation.tailrec
    final def hasNext: Boolean = {
      if (currentIter.hasNext) {
        true
      } else if (read) {
        // some events read from current partition, try next partition
        currentPartition += 1
        currentSequenceNr = firstSequenceNr(currentPartition, partitionSize)
        currentIter = newIter()
        read = false
        hasNext
      } else  {
        // no events read from current partition, we're done
        false
      }
    }

    def next(): DurableEvent = {
      val row = currentIter.next()
      currentSequenceNr = row.getLong("sequence_nr")
      read = true
      cassandra.eventFromByteBuffer(row.getBytes("event"))
    }

    override def close(): Unit =
      ()
  }
} 
Example 59
Source File: TryWithSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.common.util

import java.io.Closeable

import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

import scala.util.{Failure, Success}

class TryWithSpec extends AnyWordSpecLike with Matchers {
  // Exceptions and errors here so we don't pay the stack trace creation cost multiple times
  val getResourceException = new RuntimeException
  val inFunctionException = new RuntimeException
  val inCloseException = new RuntimeException
  val getResourceError = new OutOfMemoryError
  val inFunctionError = new OutOfMemoryError
  val inCloseError = new OutOfMemoryError

  val goodResource = new Closeable {
    override def toString: String = "good resource"
    def close(): Unit = {}
  }

  "TryWith" should {
    "catch exceptions getting the resource" in {
      TryWith(throw getResourceException)(println) shouldBe Failure(
        getResourceException
      )
    }

    "catch exceptions in the function" in {
      TryWith(goodResource) { _ =>
        throw inFunctionException
      } shouldBe Failure(inFunctionException)
    }

    "catch exceptions while closing" in {
      TryWith(new Closeable {
        def close(): Unit = throw inCloseException
      })(_.toString) shouldBe Failure(inCloseException)
    }

    "note suppressed exceptions" in {
      val ex = new RuntimeException
      val result = TryWith(new Closeable {
        def close(): Unit = throw inCloseException
      })(_ => throw ex)

      result shouldBe Failure(ex)
      val Failure(returnedException) = result
      returnedException.getSuppressed shouldBe Array(inCloseException)
    }

    "propagate errors getting the resource" in {
      intercept[OutOfMemoryError] {
        TryWith(throw getResourceError)(println)
      } shouldBe getResourceError
    }

    "propagate errors in the function" in {
      intercept[OutOfMemoryError] {
        TryWith(goodResource) { _ => throw inFunctionError }
      } shouldBe inFunctionError
    }

    "propagate errors while closing" in {
      intercept[OutOfMemoryError] {
        TryWith(new Closeable {
          def close(): Unit = throw inCloseError
        })(_.toString)
      } shouldBe inCloseError
    }

    "return the value from a successful run" in {
      TryWith(goodResource)(_.toString) shouldBe Success("good resource")
    }
  }
} 
Example 60
Source File: Resource.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.common.util

import java.io.Closeable


object Resource {

  def using[A <: Closeable, B](resource: A)(f: A => B): B = {
    require(resource != null, "The supplied resource was null.")
    try {
      f(resource)
    } finally {
      resource.close()
    }
  }
} 
Example 61
Source File: Utils.scala    From spark-integration   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.k8s.integrationtest

import java.io.Closeable
import java.net.URI

object Utils extends Logging {

  def tryWithResource[R <: Closeable, T](createResource: => R)(f: R => T): T = {
    val resource = createResource
    try f.apply(resource) finally resource.close()
  }

  def tryWithSafeFinally[T](block: => T)(finallyBlock: => Unit): T = {
    var originalThrowable: Throwable = null
    try {
      block
    } catch {
      case t: Throwable =>
        // Purposefully not using NonFatal, because even fatal exceptions
        // we don't want to have our finallyBlock suppress
        originalThrowable = t
        throw originalThrowable
    } finally {
      try {
        finallyBlock
      } catch {
        case t: Throwable =>
          if (originalThrowable != null) {
            originalThrowable.addSuppressed(t)
            logWarning(s"Suppressing exception in finally: " + t.getMessage, t)
            throw originalThrowable
          } else {
            throw t
          }
      }
    }
  }

  def checkAndGetK8sMasterUrl(rawMasterURL: String): String = {
    require(rawMasterURL.startsWith("k8s://"),
      "Kubernetes master URL must start with k8s://.")
    val masterWithoutK8sPrefix = rawMasterURL.substring("k8s://".length)

    // To handle master URLs, e.g., k8s://host:port.
    if (!masterWithoutK8sPrefix.contains("://")) {
      val resolvedURL = s"https://$masterWithoutK8sPrefix"
      logInfo("No scheme specified for kubernetes master URL, so defaulting to https. Resolved " +
        s"URL is $resolvedURL.")
      return s"k8s://$resolvedURL"
    }

    val masterScheme = new URI(masterWithoutK8sPrefix).getScheme
    val resolvedURL = masterScheme.toLowerCase match {
      case "https" =>
        masterWithoutK8sPrefix
      case "http" =>
        logWarning("Kubernetes master URL uses HTTP instead of HTTPS.")
        masterWithoutK8sPrefix
      case null =>
        val resolvedURL = s"https://$masterWithoutK8sPrefix"
        logInfo("No scheme specified for kubernetes master URL, so defaulting to https. Resolved " +
          s"URL is $resolvedURL.")
        resolvedURL
      case _ =>
        throw new IllegalArgumentException("Invalid Kubernetes master scheme: " + masterScheme)
    }

    s"k8s://$resolvedURL"
  }
} 
Example 62
Source File: HiveMRSuite.scala    From connectors   with Apache License 2.0 5 votes vote down vote up
package io.delta.hive

import java.io.{Closeable, File}

import scala.collection.JavaConverters._

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapred.{JobConf, MiniMRCluster}
import org.apache.hadoop.mapreduce.MRJobConfig
import org.apache.hadoop.yarn.conf.YarnConfiguration

class HiveMRSuite extends HiveConnectorTest {

  override val engine: String = "mr"

  override def createCluster(namenode: String, conf: Configuration, tempPath: File): Closeable = {
    val jConf = new JobConf(conf);
    jConf.set("yarn.scheduler.capacity.root.queues", "default");
    jConf.set("yarn.scheduler.capacity.root.default.capacity", "100");
    jConf.setInt(MRJobConfig.MAP_MEMORY_MB, 512);
    jConf.setInt(MRJobConfig.REDUCE_MEMORY_MB, 512);
    jConf.setInt(MRJobConfig.MR_AM_VMEM_MB, 128);
    jConf.setInt(YarnConfiguration.YARN_MINICLUSTER_NM_PMEM_MB, 512);
    jConf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
    jConf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 512);
    val mr = new MiniMRCluster(2, namenode, 1, null, null, jConf)

    new Closeable {
      override def close(): Unit = {
        mr.shutdown()
      }
    }
  }
} 
Example 63
Source File: HiveTezSuite.scala    From connectors   with Apache License 2.0 5 votes vote down vote up
package io.delta.hive

import java.io.{Closeable, File}

import scala.collection.JavaConverters._

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapreduce.MRJobConfig
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.tez.dag.api.TezConfiguration
import org.apache.tez.runtime.library.api.TezRuntimeConfiguration
import org.apache.tez.test.MiniTezCluster

class HiveTezSuite extends HiveConnectorTest {

  override val engine: String = "tez"

  private var tezConf: Configuration = _

  // scalastyle:off
  
  // scalastyle:on
  override def setupConfiguration(conf: Configuration): Unit = {
    tezConf.asScala.foreach { e =>
      conf.set(e.getKey, e.getValue)
    }
    // Overrides values from the hive/tez-site.
    conf.setInt("hive.tez.container.size", 256)
    conf.setInt(TezConfiguration.TEZ_AM_RESOURCE_MEMORY_MB, 256)
    conf.setInt(TezConfiguration.TEZ_TASK_RESOURCE_MEMORY_MB, 256)
    conf.setInt(TezRuntimeConfiguration.TEZ_RUNTIME_IO_SORT_MB, 24)
    conf.setInt(TezRuntimeConfiguration.TEZ_RUNTIME_UNORDERED_OUTPUT_BUFFER_SIZE_MB, 10)
    conf.setFloat(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCH_BUFFER_PERCENT, 0.4f)
    conf.setBoolean(TezConfiguration.TEZ_IGNORE_LIB_URIS, true)
  }
} 
Example 64
Source File: SafeCloseableTest.scala    From maha   with Apache License 2.0 5 votes vote down vote up
// Copyright 2017, Yahoo Holdings Inc.
// Licensed under the terms of the Apache License 2.0. Please see LICENSE file in project root for terms.
package com.yahoo.maha.report

import java.io.{Closeable, IOException}

import org.mockito.Mockito._
import org.scalatest.{FunSuite, Matchers}

class SuccessCloseable extends Closeable {
  override def close(): Unit = {
    //success
  }
}
class FailCloseable extends Closeable {
  override def close(): Unit = {
    throw new IOException("fail")
  }
}
class SafeCloseableTest extends FunSuite with Matchers {
  def successWork(closeable: Closeable): Unit = {
    //success
  }
  def failWork(closeable: Closeable): Unit = {
    require(false, "fail")
  }
  test("successfully doWork") {
    safeCloseable(new SuccessCloseable)(successWork)
  }
  test("successfully close on failed doWork") {
    val closeable = spy(new SuccessCloseable)
    safeCloseable(closeable)(failWork)
    verify(closeable).close()
  }
  test("fail to close on failed closeable after failed doWork") {
    val closeable = spy(new FailCloseable)
    safeCloseable(closeable)(failWork)
    verify(closeable).close()
  }
} 
Example 65
Source File: UJESClient.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
abstract class UJESClient extends Closeable {

  def execute(jobExecuteAction: JobExecuteAction): JobExecuteResult = executeUJESJob(jobExecuteAction).asInstanceOf[JobExecuteResult]

  protected[client] def executeUJESJob(ujesJobAction: UJESJobAction): Result

  private def executeJobExecIdAction[T](jobExecuteResult: JobExecuteResult, jobServiceType: JobServiceType.JobServiceType): T = {
    val jobExecIdAction = JobExecIdAction.builder().setJobServiceType(jobServiceType)
      .setExecId(jobExecuteResult.getExecID).setUser(jobExecuteResult.getUser).build()
    executeUJESJob(jobExecIdAction).asInstanceOf[T]
  }

  def status(jobExecuteResult: JobExecuteResult): JobStatusResult = executeJobExecIdAction(jobExecuteResult, JobServiceType.JobStatus)

  def progress(jobExecuteResult: JobExecuteResult): JobProgressResult =
    executeJobExecIdAction(jobExecuteResult, JobServiceType.JobProgress)

  def log(jobExecuteResult: JobExecuteResult, fromLine: Int, size: Int): JobLogResult = {
    val jobLogAction = JobLogAction.builder().setExecId(jobExecuteResult.getExecID)
      .setUser(jobExecuteResult.getUser).setFromLine(fromLine).setSize(size).build()
    executeUJESJob(jobLogAction).asInstanceOf[JobLogResult]
  }

  def log(jobExecuteResult: JobExecuteResult, jobLogResult: JobLogResult): JobLogResult = {
    val jobLogAction = JobLogAction.builder().setExecId(jobExecuteResult.getExecID)
      .setUser(jobExecuteResult.getUser).setFromLine(jobLogResult.getFromLine).build()
    executeUJESJob(jobLogAction).asInstanceOf[JobLogResult]
  }

  def kill(jobExecuteResult: JobExecuteResult): JobKillResult = executeJobExecIdAction(jobExecuteResult, JobServiceType.JobKill)



  def pause(jobExecuteResult: JobExecuteResult): JobPauseResult = executeJobExecIdAction(jobExecuteResult, JobServiceType.JobPause)

  def getJobInfo(jobExecuteResult: JobExecuteResult): JobInfoResult = {
    val jobInfoAction = JobInfoAction.builder().setTaskId(jobExecuteResult).build()
    executeUJESJob(jobInfoAction).asInstanceOf[JobInfoResult]
  }

  def resultSet(resultSetAction: ResultSetAction): ResultSetResult =
    executeUJESJob(resultSetAction).asInstanceOf[ResultSetResult]
}
object UJESClient {
  def apply(clientConfig: DWSClientConfig): UJESClient = new UJESClientImpl(clientConfig)

  def apply(serverUrl: String): UJESClient = apply(serverUrl, 30000, 10)

  def apply(serverUrl: String, readTimeout: Int, maxConnection: Int): UJESClient =
    apply(serverUrl, readTimeout, maxConnection, new StaticAuthenticationStrategy, "v1")

  def apply(serverUrl: String, readTimeout: Int, maxConnection: Int,
            authenticationStrategy: AuthenticationStrategy, dwsVersion: String): UJESClient = {
    val clientConfig = DWSClientConfigBuilder.newBuilder().addUJESServerUrl(serverUrl)
      .connectionTimeout(30000).discoveryEnabled(false)
        .loadbalancerEnabled(false).maxConnectionSize(maxConnection)
      .retryEnabled(false).readTimeout(readTimeout)
      .setAuthenticationStrategy(authenticationStrategy)
      .setDWSVersion(dwsVersion).build()
    apply(clientConfig)
  }

  def getDiscoveryClient(serverUrl: String): UJESClient = getDiscoveryClient(serverUrl, 30000, 10)

  def getDiscoveryClient(serverUrl: String, readTimeout: Int, maxConnection: Int): UJESClient =
    getDiscoveryClient(serverUrl, readTimeout, maxConnection, new StaticAuthenticationStrategy, "v1")

  def getDiscoveryClient(serverUrl: String, readTimeout: Int, maxConnection: Int,
                         authenticationStrategy: AuthenticationStrategy, dwsVersion: String): UJESClient = {
    val clientConfig = DWSClientConfigBuilder.newBuilder().addUJESServerUrl(serverUrl)
      .connectionTimeout(30000).discoveryEnabled(true)
      .discoveryFrequency(1, TimeUnit.MINUTES)
      .loadbalancerEnabled(true).maxConnectionSize(maxConnection)
      .retryEnabled(false).readTimeout(readTimeout)
      .setAuthenticationStrategy(authenticationStrategy).setDWSVersion(dwsVersion).build()
    apply(clientConfig)
  }
} 
Example 66
Source File: LogWriter.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.entrance.log

import java.io.{Closeable, Flushable, OutputStream}
import java.util

import com.webank.wedatasphere.linkis.common.io.FsPath
import com.webank.wedatasphere.linkis.common.utils.{Logging, Utils}
import com.webank.wedatasphere.linkis.entrance.exception.EntranceErrorException
import com.webank.wedatasphere.linkis.storage.FSFactory
import com.webank.wedatasphere.linkis.storage.utils.FileSystemUtils
import org.apache.commons.io.IOUtils
import org.apache.commons.lang.StringUtils


abstract class LogWriter(charset: String) extends Closeable with Flushable with Logging {

  private var firstWrite = true

  protected val outputStream: OutputStream

  def write(msg: String): Unit = synchronized {
    val log = if (!firstWrite) "\n" + msg else {
      firstWrite = false
      msg
    }
    Utils.tryQuietly({
      outputStream.write(log.getBytes(charset))
      outputStream.flush()
    }, t => {
      warn("error when write query log to outputStream.", t)
      info(msg)
    })
  }



  def flush(): Unit = Utils.tryQuietly(outputStream.flush(),  t => {
    warn("Error encounters when flush log,", t)
  })


  def close(): Unit = {
    flush()
    if (outputStream != null) IOUtils.closeQuietly(outputStream)

  }
}

abstract class AbstractLogWriter(logPath: String,
                                 user: String,
                                 charset: String) extends LogWriter(charset) {
  if(StringUtils.isBlank(logPath)) throw new EntranceErrorException(20301, "logPath cannot be empty.")
  protected val fileSystem = FSFactory.getFs(new FsPath(logPath))
  fileSystem.init(new util.HashMap[String, String]())

  protected val outputStream: OutputStream = {
    FileSystemUtils.createNewFile(new FsPath(logPath), true)
    fileSystem.write(new FsPath(logPath), true)
  }

  override def close(): Unit = {
    super.close()
    if (fileSystem != null) Utils.tryQuietly(fileSystem.close(), t => {
      warn("Error encounters when closing fileSystem", t)
    })
  }
} 
Example 67
Source File: MergeStrategySpec.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package daf.filesystem

import java.io.{ Closeable, InputStream }
import java.util.Scanner

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{ FSDataInputStream, FSDataOutputStream, FileSystem, Path }
import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpec }

import scala.collection.convert.decorateAsScala._
import scala.util.{ Random, Try }

class MergeStrategySpec extends WordSpec with Matchers with BeforeAndAfterAll {

  private implicit val fileSystem = FileSystem.getLocal(new Configuration)

  private val numFiles = 10

  private val baseDir = "test-dir".asHadoop

  private val workingDir = baseDir / f"merge-strategy-spec-${Random.nextInt(10000)}%05d"

  private def safely[A <: Closeable, U](f: A => U) = { stream: A =>
    val attempt = Try { f(stream) }
    stream.close()
    attempt
  }

  private def readFile(path: Path) = safely[FSDataInputStream, Seq[String]] { _.scanner.asScala.toSeq } apply fileSystem.open(path)

  private def readFiles = Try {
    fileSystem.listStatus(workingDir).toSeq.flatMap { status => readFile(status.getPath).get }
  }

  private def openFiles = Try {
    fileSystem.listStatus(workingDir).toSeq.map { status => fileSystem.open(status.getPath) }
  }

  private def createFile(fileName: String) = safely[FSDataOutputStream, Unit] { stream =>
    Random.alphanumeric.grouped(200).take(10).map { randomSplits(_) }.foreach { row =>
      stream.writeUTF { row.mkString("", ",", "\n") }
    }
  } apply fileSystem.create { workingDir / fileName }

  private def randomSplits(chars: Stream[Char], strings: Seq[String] = Seq.empty): Seq[String] = chars.splitAt { Random.nextInt(10) + 5 } match {
    case (head, tail) if tail.isEmpty => head.drop(1).mkString +: strings
    case (head, tail)                 => randomSplits(tail, head.mkString +: strings)
  }

  private def createWorkingDir = Try { fileSystem.mkdirs(workingDir) }

  private def createFiles = Try {
    0 until numFiles foreach { index => createFile(s"test-file-$index").get } // this is relatively nasty, and should be handled in a `traverse`
  }

  private def prepareData = for {
    _ <- createWorkingDir
    _ <- createFiles
  } yield ()

  private def purgeData = Try { fileSystem.delete(workingDir, true) }

  override def beforeAll() = prepareData.get

  override def afterAll() = purgeData.get

  "MergeStrategies info" when {

    "given compressed format files" must {

      "throw an exception" in {
        an[IllegalArgumentException] must be thrownBy MergeStrategies.find { FileInfo(workingDir / "test-file-0", 0, FileDataFormats.raw, FileCompressionFormats.gzip) }
      }
    }

    "given data as csv" must {

      "drop one line and merge the rest" in {
        safely[InputStream, Seq[String]] { new Scanner(_).asScala.toList }.andThen { attempt =>
          for {
            merged   <- attempt
            expected <- readFiles
          } merged.size should be { expected.size - numFiles + 1 }
        } apply MergeStrategies.csv.merge { openFiles.get }
      }
    }

    "given data as json" must {

      "just merge the files into one" in {
        safely[InputStream, Seq[String]] { new Scanner(_).asScala.toList }.andThen { attempt =>
          for {
            merged   <- attempt
            expected <- readFiles
          } merged.size should be { expected.size }
        } apply MergeStrategies.json.merge { openFiles.get }
      }

    }
  }
} 
Example 68
Source File: ResourceManagement.scala    From scalismo-faces   with Apache License 2.0 5 votes vote down vote up
package scalismo.faces.utils

import java.io.Closeable

import scala.io.Source
import scala.util.control.NonFatal
import scala.util.{Failure, Try}


  def usingOption[T <: Closeable, R](obj: => Option[T], after: T => Unit = { t: T => t.close() })(block: T => Option[R]): Option[R] = {
    val o: Option[T] = try {
      obj
    } catch {
      case NonFatal(e) => None
    }
    o.flatMap { res =>
      try {
        block(res)
      } finally {
        after(res)
      }
    }
  }

} 
Example 69
Source File: CloseableDataAccessor.scala    From ArchiveSpark   with MIT License 5 votes vote down vote up
package org.archive.archivespark.dataspecs.access

import java.io.Closeable

import scala.util.Try

trait CloseableDataAccessor[Data <: Closeable] extends DataAccessor[Data] {
  override def access[R >: Null](action: Data => R): R = {
    get match {
      case Some(data) =>
        val result = action(data)
        Try {data.close()}
        result
      case None => null
    }
  }
} 
Example 70
Source File: BlockTransferService.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network

import java.io.Closeable
import java.nio.ByteBuffer

import scala.concurrent.{Future, Promise}
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag

import org.apache.spark.internal.Logging
import org.apache.spark.network.buffer.{ManagedBuffer, NioManagedBuffer}
import org.apache.spark.network.shuffle.{BlockFetchingListener, ShuffleClient}
import org.apache.spark.scheduler.MapStatus
import org.apache.spark.storage.{BlockId, StorageLevel}
import org.apache.spark.util.ThreadUtils

private[spark]
abstract class BlockTransferService extends ShuffleClient with Closeable with Logging {

  
  def uploadBlockSync(
      hostname: String,
      port: Int,
      execId: String,
      blockId: BlockId,
      blockData: ManagedBuffer,
      level: StorageLevel,
      classTag: ClassTag[_]): Unit = {
    val future = uploadBlock(hostname, port, execId, blockId, blockData, level, classTag)
    ThreadUtils.awaitResult(future, Duration.Inf)
  }
} 
Example 71
Source File: FileBasedWriteAheadLogReader.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.io.{Closeable, EOFException, IOException}
import java.nio.ByteBuffer

import org.apache.hadoop.conf.Configuration

import org.apache.spark.internal.Logging


private[streaming] class FileBasedWriteAheadLogReader(path: String, conf: Configuration)
  extends Iterator[ByteBuffer] with Closeable with Logging {

  private val instream = HdfsUtils.getInputStream(path, conf)
  private var closed = (instream == null) // the file may be deleted as we're opening the stream
  private var nextItem: Option[ByteBuffer] = None

  override def hasNext: Boolean = synchronized {
    if (closed) {
      return false
    }

    if (nextItem.isDefined) { // handle the case where hasNext is called without calling next
      true
    } else {
      try {
        val length = instream.readInt()
        val buffer = new Array[Byte](length)
        instream.readFully(buffer)
        nextItem = Some(ByteBuffer.wrap(buffer))
        logTrace("Read next item " + nextItem.get)
        true
      } catch {
        case e: EOFException =>
          logDebug("Error reading next item, EOF reached", e)
          close()
          false
        case e: IOException =>
          logWarning("Error while trying to read data. If the file was deleted, " +
            "this should be okay.", e)
          close()
          if (HdfsUtils.checkFileExists(path, conf)) {
            // If file exists, this could be a legitimate error
            throw e
          } else {
            // File was deleted. This can occur when the daemon cleanup thread takes time to
            // delete the file during recovery.
            false
          }

        case e: Exception =>
          logWarning("Error while trying to read data from HDFS.", e)
          close()
          throw e
      }
    }
  }

  override def next(): ByteBuffer = synchronized {
    val data = nextItem.getOrElse {
      close()
      throw new IllegalStateException(
        "next called without calling hasNext or after hasNext returned false")
    }
    nextItem = None // Ensure the next hasNext call loads new data.
    data
  }

  override def close(): Unit = synchronized {
    if (!closed) {
      instream.close()
    }
    closed = true
  }
} 
Example 72
Source File: FileBasedWriteAheadLogRandomReader.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.io.Closeable
import java.nio.ByteBuffer

import org.apache.hadoop.conf.Configuration


private[streaming] class FileBasedWriteAheadLogRandomReader(path: String, conf: Configuration)
  extends Closeable {

  private val instream = HdfsUtils.getInputStream(path, conf)
  private var closed = (instream == null) // the file may be deleted as we're opening the stream

  def read(segment: FileBasedWriteAheadLogSegment): ByteBuffer = synchronized {
    assertOpen()
    instream.seek(segment.offset)
    val nextLength = instream.readInt()
    HdfsUtils.checkState(nextLength == segment.length,
      s"Expected message length to be ${segment.length}, but was $nextLength")
    val buffer = new Array[Byte](nextLength)
    instream.readFully(buffer)
    ByteBuffer.wrap(buffer)
  }

  override def close(): Unit = synchronized {
    closed = true
    instream.close()
  }

  private def assertOpen() {
    HdfsUtils.checkState(!closed, "Stream is closed. Create a new Reader to read from the file.")
  }
} 
Example 73
Source File: HadoopFileLinesReader.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources

import java.io.Closeable
import java.net.URI

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.input.{FileSplit, LineRecordReader}
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl


class HadoopFileLinesReader(
    file: PartitionedFile, conf: Configuration) extends Iterator[Text] with Closeable {
  private val iterator = {
    val fileSplit = new FileSplit(
      new Path(new URI(file.filePath)),
      file.start,
      file.length,
      // TODO: Implement Locality
      Array.empty)
    val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
    val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)
    val reader = new LineRecordReader()
    reader.initialize(fileSplit, hadoopAttemptContext)
    new RecordReaderIterator(reader)
  }

  override def hasNext: Boolean = iterator.hasNext

  override def next(): Text = iterator.next()

  override def close(): Unit = iterator.close()
} 
Example 74
Source File: RecordReaderIterator.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources

import java.io.Closeable

import org.apache.hadoop.mapreduce.RecordReader

import org.apache.spark.sql.catalyst.InternalRow


class RecordReaderIterator[T](
    private[this] var rowReader: RecordReader[_, T]) extends Iterator[T] with Closeable {
  private[this] var havePair = false
  private[this] var finished = false

  override def hasNext: Boolean = {
    if (!finished && !havePair) {
      finished = !rowReader.nextKeyValue
      if (finished) {
        // Close and release the reader here; close() will also be called when the task
        // completes, but for tasks that read from many files, it helps to release the
        // resources early.
        close()
      }
      havePair = !finished
    }
    !finished
  }

  override def next(): T = {
    if (!hasNext) {
      throw new java.util.NoSuchElementException("End of stream")
    }
    havePair = false
    rowReader.getCurrentValue
  }

  override def close(): Unit = {
    if (rowReader != null) {
      try {
        // Close the reader and release it. Note: it's very important that we don't close the
        // reader more than once, since that exposes us to MAPREDUCE-5918 when running against
        // older Hadoop 2.x releases. That bug can lead to non-deterministic corruption issues
        // when reading compressed input.
        rowReader.close()
      } finally {
        rowReader = null
      }
    }
  }
} 
Example 75
Source File: RedeliveryActors.scala    From kmq   with Apache License 2.0 5 votes vote down vote up
package com.softwaremill.kmq.redelivery

import java.io.Closeable
import java.util.Collections

import akka.actor.{ActorSystem, Props}
import com.softwaremill.kmq.{KafkaClients, KmqConfig}
import com.typesafe.scalalogging.StrictLogging

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.collection.JavaConverters._

object RedeliveryActors extends StrictLogging {
  def start(clients: KafkaClients, config: KmqConfig): Closeable = {
    val system = ActorSystem("kmq-redelivery")

    val consumeMakersActor = system.actorOf(Props(new ConsumeMarkersActor(clients, config)), "consume-markers-actor")
    consumeMakersActor ! DoConsume

    logger.info("Started redelivery actors")

    new Closeable {
      override def close(): Unit = Await.result(system.terminate(), 1.minute)
    }
  }
} 
Example 76
Source File: FileBasedWriteAheadLogReader.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.io.{Closeable, EOFException, IOException}
import java.nio.ByteBuffer

import org.apache.hadoop.conf.Configuration

import org.apache.spark.internal.Logging


private[streaming] class FileBasedWriteAheadLogReader(path: String, conf: Configuration)
  extends Iterator[ByteBuffer] with Closeable with Logging {

  private val instream = HdfsUtils.getInputStream(path, conf)
  private var closed = (instream == null) // the file may be deleted as we're opening the stream
  private var nextItem: Option[ByteBuffer] = None

  override def hasNext: Boolean = synchronized {
    if (closed) {
      return false
    }

    if (nextItem.isDefined) { // handle the case where hasNext is called without calling next
      true
    } else {
      try {
        val length = instream.readInt()
        val buffer = new Array[Byte](length)
        instream.readFully(buffer)
        nextItem = Some(ByteBuffer.wrap(buffer))
        logTrace("Read next item " + nextItem.get)
        true
      } catch {
        case e: EOFException =>
          logDebug("Error reading next item, EOF reached", e)
          close()
          false
        case e: IOException =>
          logWarning("Error while trying to read data. If the file was deleted, " +
            "this should be okay.", e)
          close()
          if (HdfsUtils.checkFileExists(path, conf)) {
            // If file exists, this could be a legitimate error
            throw e
          } else {
            // File was deleted. This can occur when the daemon cleanup thread takes time to
            // delete the file during recovery.
            false
          }

        case e: Exception =>
          logWarning("Error while trying to read data from HDFS.", e)
          close()
          throw e
      }
    }
  }

  override def next(): ByteBuffer = synchronized {
    val data = nextItem.getOrElse {
      close()
      throw new IllegalStateException(
        "next called without calling hasNext or after hasNext returned false")
    }
    nextItem = None // Ensure the next hasNext call loads new data.
    data
  }

  override def close(): Unit = synchronized {
    if (!closed) {
      instream.close()
    }
    closed = true
  }
} 
Example 77
Source File: FileBasedWriteAheadLogRandomReader.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.io.Closeable
import java.nio.ByteBuffer

import org.apache.hadoop.conf.Configuration


private[streaming] class FileBasedWriteAheadLogRandomReader(path: String, conf: Configuration)
  extends Closeable {

  private val instream = HdfsUtils.getInputStream(path, conf)
  private var closed = (instream == null) // the file may be deleted as we're opening the stream

  def read(segment: FileBasedWriteAheadLogSegment): ByteBuffer = synchronized {
    assertOpen()
    instream.seek(segment.offset)
    val nextLength = instream.readInt()
    HdfsUtils.checkState(nextLength == segment.length,
      s"Expected message length to be ${segment.length}, but was $nextLength")
    val buffer = new Array[Byte](nextLength)
    instream.readFully(buffer)
    ByteBuffer.wrap(buffer)
  }

  override def close(): Unit = synchronized {
    closed = true
    instream.close()
  }

  private def assertOpen() {
    HdfsUtils.checkState(!closed, "Stream is closed. Create a new Reader to read from the file.")
  }
} 
Example 78
Source File: HadoopFileLinesReader.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources

import java.io.Closeable
import java.net.URI

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.input.{FileSplit, LineRecordReader}
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl


class HadoopFileLinesReader(
    file: PartitionedFile, conf: Configuration) extends Iterator[Text] with Closeable {
  private val iterator = {
    val fileSplit = new FileSplit(
      new Path(new URI(file.filePath)),
      file.start,
      file.length,
      // TODO: Implement Locality
      Array.empty)
    val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
    val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)
    val reader = new LineRecordReader()
    reader.initialize(fileSplit, hadoopAttemptContext)
    new RecordReaderIterator(reader)
  }

  override def hasNext: Boolean = iterator.hasNext

  override def next(): Text = iterator.next()

  override def close(): Unit = iterator.close()
} 
Example 79
Source File: RecordReaderIterator.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources

import java.io.Closeable

import org.apache.hadoop.mapreduce.RecordReader

import org.apache.spark.sql.catalyst.InternalRow


class RecordReaderIterator[T](
    private[this] var rowReader: RecordReader[_, T]) extends Iterator[T] with Closeable {
  private[this] var havePair = false
  private[this] var finished = false

  override def hasNext: Boolean = {
    if (!finished && !havePair) {
      finished = !rowReader.nextKeyValue
      if (finished) {
        // Close and release the reader here; close() will also be called when the task
        // completes, but for tasks that read from many files, it helps to release the
        // resources early.
        close()
      }
      havePair = !finished
    }
    !finished
  }

  override def next(): T = {
    if (!hasNext) {
      throw new java.util.NoSuchElementException("End of stream")
    }
    havePair = false
    rowReader.getCurrentValue
  }

  override def close(): Unit = {
    if (rowReader != null) {
      try {
        // Close the reader and release it. Note: it's very important that we don't close the
        // reader more than once, since that exposes us to MAPREDUCE-5918 when running against
        // older Hadoop 2.x releases. That bug can lead to non-deterministic corruption issues
        // when reading compressed input.
        rowReader.close()
      } finally {
        rowReader = null
      }
    }
  }
} 
Example 80
Source File: DoubleInputBuffer.scala    From hail   with MIT License 5 votes vote down vote up
package is.hail.io

import java.io.{Closeable, InputStream, OutputStream}

import is.hail.annotations.Memory
import is.hail.utils._

final class DoubleInputBuffer(in: InputStream, bufSize: Int) extends Closeable {
  private val buf = new Array[Byte](bufSize)
  private var end: Int = 0
  private var off: Int = 0

  def close() {
    in.close()
  }

  def readDoubles(to: Array[Double]): Unit = readDoubles(to, 0, to.length)

  def readDoubles(to: Array[Double], toOff0: Int, n0: Int) {
    assert(toOff0 >= 0)
    assert(n0 >= 0)
    assert(toOff0 <= to.length - n0)

    var toOff = toOff0
    var n = n0.toLong

    while (n > 0) {
      if (end == off) {
        val len = math.min(bufSize, n << 3).toInt
        in.readFully(buf, 0, len)
        end = len
        off = 0
      }
      val p = math.min(end - off, n << 3).toInt >>> 3
      assert(p > 0)
      Memory.memcpy(to, toOff, buf, off, p)
      toOff += p
      n -= p
      off += (p << 3)
    }
  }
}

final class DoubleOutputBuffer(out: OutputStream, bufSize: Int) extends Closeable {
  private val buf: Array[Byte] = new Array[Byte](bufSize)
  private var off: Int = 0

  def close() {
    flush()
    out.close()
  }

  def flush() {
    out.write(buf, 0, off)
  }

  def writeDoubles(from: Array[Double]): Unit = writeDoubles(from, 0, from.length)

  def writeDoubles(from: Array[Double], fromOff0: Int, n0: Int) {
    assert(n0 >= 0)
    assert(fromOff0 >= 0)
    assert(fromOff0 <= from.length - n0)
    var fromOff = fromOff0
    var n = n0.toLong

    while (off + (n << 3) > bufSize) {
      val p = (buf.length - off) >>> 3
      Memory.memcpy(buf, off, from, fromOff, p)
      off += (p << 3)
      fromOff += p
      n -= p
      out.write(buf, 0, off)
      off = 0
    }
    Memory.memcpy(buf, off, from, fromOff, n)
    off += (n.toInt << 3)
  }
} 
Example 81
Source File: KeyValueReaderIterator.scala    From mmlspark   with MIT License 5 votes vote down vote up
// Copyright (C) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See LICENSE in project root for information.

package org.apache.spark.binary

import java.io.Closeable
import org.apache.hadoop.mapreduce.RecordReader

// Based on:
// https://github.com/apache/spark/blob/master/sql/core/src/main/scala/
//   org/apache/spark/sql/execution/datasources/RecordReaderIterator.scala


private class KeyValueReaderIterator[K, V] (
  private[this] var rowReader: RecordReader[K, V]) extends Iterator[(K, V)] with Closeable {
  private[this] var havePair = false
  private[this] var finished = false

  override def hasNext: Boolean = {
    if (!finished && !havePair) {
      finished = !rowReader.nextKeyValue
      if (finished) {
        // Close and release the reader here; close() will also be called when the task
        // completes, but for tasks that read from many files, it helps to release the
        // resources early.
        close()
      }
      havePair = !finished
    }
    !finished
  }

  override def next(): (K, V) = {
    if (!hasNext) {
      throw new java.util.NoSuchElementException("End of stream")
    }
    havePair = false
    (rowReader.getCurrentKey, rowReader.getCurrentValue)
  }

  override def close(): Unit = {
    if (rowReader != null) { //scalastyle:ignore null
      try {
        rowReader.close()
      } finally {
        rowReader = null //scalastyle:ignore null
      }
    }
  }

} 
Example 82
Source File: KubeServiceLocatorServer.scala    From lagom-on-kube   with Apache License 2.0 5 votes vote down vote up
package me.alexray.lagom.kube.discovery

import java.io.Closeable
import java.net.URI
import java.util.{Map => JMap}

import com.lightbend.lagom.gateway.{ServiceGateway, ServiceGatewayConfig, ServiceGatewayFactory}
import me.alexray.lagom.kube.discovery.impl.KubeServiceRegistryModule
import me.alexray.lagom.kube.gateway.{KubeServiceGateway, KubeServiceGatewayConfig, KubeServiceGatewayFactory}
import play.api.Application
import play.api.Logger
import play.api.Mode
import play.api.Play
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.inject.guice.GuiceableModule.fromGuiceModule
import play.core.server.ServerConfig
import play.core.server.ServerProvider
import play.core.server.ServerWithStop

import scala.util.control.NonFatal

class KubeServiceLocatorServer extends Closeable {
  private val logger: Logger = Logger(this.getClass)

  @volatile private var server: ServerWithStop = _
  @volatile private var gateway: KubeServiceGateway = _

  def start(serviceLocatorPort: Int, serviceGatewayPort: Int, unmanagedServices: JMap[String, String]): Unit = synchronized {
    require(server == null, "Service locator is already running on " + server.mainAddress)

    val application = createApplication(KubeServiceGatewayConfig(serviceGatewayPort), unmanagedServices)
    Play.start(application)
    try {
      server = createServer(application, serviceLocatorPort)
    } catch {
      case NonFatal(e) =>
        throw new RuntimeException(s"Unable to start service locator on port $serviceLocatorPort", e)
    }
    try {
      gateway = application.injector.instanceOf[KubeServiceGatewayFactory].start()
    } catch {
      case NonFatal(e) =>
        throw new RuntimeException(s"Unable to start service gateway on port $serviceGatewayPort", e)
    }
    logger.info("Service locator can be reached at " + serviceLocatorAddress)
    logger.info("Service gateway can be reached at " + serviceGatewayAddress)
  }

  private def createApplication(serviceGatewayConfig: KubeServiceGatewayConfig, unmanagedServices: JMap[String, String]): Application = {
    new GuiceApplicationBuilder()
      .overrides(KubeServiceRegistryModule(serviceGatewayConfig, unmanagedServices))
      .build()
  }

  private def createServer(application: Application, port: Int): ServerWithStop = {
    val config = ServerConfig(port = Some(port), mode = Mode.Test)
    val provider = implicitly[ServerProvider]
    provider.createServer(config, application)
  }

  override def close(): Unit = synchronized {
    if (server == null) Logger.logger.debug("Service locator was already stopped")
    else {
      logger.debug("Stopping service locator...")
      server.stop()
      server = null
      logger.info("Service locator stopped")
    }
  }

  def serviceLocatorAddress: URI = {
    // Converting InetSocketAddress into URL is not that simple.
    // Because we know the service locator is running locally, I'm hardcoding the hostname and protocol.
    new URI(s"http://localhost:${server.mainAddress.getPort}")
  }

  def serviceGatewayAddress: URI = {
    new URI(s"http://localhost:${gateway.address.getPort}")
  }
} 
Example 83
Source File: SplitBam.scala    From fgbio   with MIT License 5 votes vote down vote up
package com.fulcrumgenomics.bam

import java.io.Closeable
import java.nio.file.Files

import com.fulcrumgenomics.FgBioDef.FgBioEnum
import com.fulcrumgenomics.bam.api.{SamSource, SamWriter}
import com.fulcrumgenomics.cmdline.{ClpGroups, FgBioTool}
import com.fulcrumgenomics.commons.CommonsDef.{PathPrefix, PathToBam, SafelyClosable, javaIterableToIterator, javaIteratorAsScalaIterator}
import com.fulcrumgenomics.commons.io.{Io, PathUtil}
import com.fulcrumgenomics.commons.util.{LazyLogging, SimpleCounter}
import com.fulcrumgenomics.sopt.{arg, clp}
import com.fulcrumgenomics.util.ProgressLogger
import enumeratum.EnumEntry
import htsjdk.samtools._

import scala.collection.immutable.IndexedSeq

sealed trait SplitType extends EnumEntry
object SplitType extends FgBioEnum[SplitType] {
  def values: IndexedSeq[SplitType] = findValues
  case object Library extends SplitType
  case object ReadGroup extends SplitType
}

@clp(group = ClpGroups.SamOrBam, description=
  """
    |Splits a BAM into multiple BAMs, one per-read group (or library).
    |
    |The resulting BAMs will be named `<output-prefix>.<read-group-id>.bam`, or `<output-prefix>.<library-name>.bam`
    |when splitting by the library.  All reads without a read group, or without a library when splitting by library,
    |will be written to `<output-prefix>.unknown.bam`.  If no such reads exist, then no such file will exist.
  """)
class SplitBam
( @arg(flag='i', doc="Input SAM or BAM file.") val input: PathToBam,
  @arg(flag='o', doc="Output prefix for all SAM or BAM files (ex. output/sample-name).") val output: PathPrefix,
  @arg(flag='s', doc="Split by library instead of read group") val splitBy: SplitType = SplitType.ReadGroup,
  @arg(flag='u', doc="The name to use for the unknown file") val unknown: String = "unknown"
) extends FgBioTool with LazyLogging {

  Io.assertReadable(input)
  Io.assertCanWriteFile(output)

  override def execute(): Unit = {
    val in       = SamSource(input)
    val progress = ProgressLogger(logger)
    val unknownBamAndWriter = {
      val unknownBam: PathToBam = toOutput(unknown)
      val unknownWriter: SamWriter = SamWriter(toOutput(unknown), in.header)
      //new SAMFileWriterFactory().makeWriter(in.h, true, toOutput(unknown).toFile, null)
      WriterInfo(name=unknown, bam=unknownBam, writer=unknownWriter)
    }
    val writers  = createWriters(header=in.header, splitBy=splitBy).withDefaultValue(unknownBamAndWriter)
    val counter  = new SimpleCounter[WriterInfo]()

    in.foreach { rec =>
      val info = writers(rec.readGroup)
      info.writer += rec
      counter.count(info)
      progress.record(rec)
    }

    writers.values.foreach(_.close())
    unknownBamAndWriter.close()
    in.safelyClose()

    counter.toSeq sortBy { _._1.name } foreach { case (info, count) =>
      logger.info(s"Wrote $count records to ${info.bam.getFileName}")
    }

    if (counter.countOf(unknownBamAndWriter) == 0) {
      Files.delete(unknownBamAndWriter.bam)
    }
  }

  
  private def createWriters(header: SAMFileHeader, splitBy: SplitType): Map[SAMReadGroupRecord, WriterInfo] = {
    splitBy match {
      case SplitType.Library =>
        header.getReadGroups.toSeq.groupBy { rg => rg.getLibrary }
          .flatMap { case (library, readGroups) =>
            val bam = toOutput(library)
            val writer = SamWriter(bam, header)
            readGroups.map { rg => rg -> WriterInfo(name=library, bam=bam, writer=writer) }
          }
      case SplitType.ReadGroup =>
        header.getReadGroups.map { rg =>
          val bam = toOutput(rg.getId)
          val writer = SamWriter(bam, header)
          rg -> WriterInfo(name=rg.getId, bam=bam, writer=writer)
        }.toMap
    }
  }

  private[bam] def toOutput(name: String): PathToBam = {
    val outputDir = output.getParent
    val prefix    = output.getFileName
    outputDir.resolve(PathUtil.sanitizeFileName(s"$prefix.$name.bam"))
  }
} 
Example 84
Source File: VcfSource.scala    From fgbio   with MIT License 5 votes vote down vote up
package com.fulcrumgenomics.vcf.api

import java.io.Closeable

import com.fulcrumgenomics.FgBioDef._
import com.fulcrumgenomics.commons.collection.SelfClosingIterator
import com.fulcrumgenomics.commons.io.PathUtil
import htsjdk.samtools.util.CloseableIterator
import htsjdk.tribble.AbstractFeatureReader
import htsjdk.variant.bcf2.BCF2Codec
import htsjdk.variant.variantcontext.VariantContext
import htsjdk.variant.vcf.{VCFCodec, VCFFileReader, VCFHeader}


  def apply(path: PathToVcf): VcfSource = {
    val codec  = if (PathUtil.extensionOf(path).contains(".bcf")) {
      new BCF2Codec
    }
    else {
      val c = new VCFCodec
      c.disableOnTheFlyModifications()
      c
    }

    val reader = AbstractFeatureReader.getFeatureReader(path.toUri.toString, codec, false)
    new VcfSource(reader)
  }
} 
Example 85
Source File: BytecodeUtil.scala    From sbt-jni   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package ch.jodersky.sbt.jni
package util

import java.io.{ File, FileInputStream, Closeable }
import scala.collection.mutable.{ HashSet }

import org.objectweb.asm.{ ClassReader, ClassVisitor, MethodVisitor, Opcodes }

object BytecodeUtil {

  private class NativeFinder extends ClassVisitor(Opcodes.ASM5) {

    // classes found to contain at least one @native def
    val _nativeClasses = new HashSet[String]
    def nativeClasses = _nativeClasses.toSet

    private var fullyQualifiedName: String = ""

    override def visit(version: Int, access: Int, name: String, signature: String,
      superName: String, interfaces: Array[String]): Unit = {
      fullyQualifiedName = name.replaceAll("/", ".")
    }

    override def visitMethod(access: Int, name: String, desc: String,
      signature: String, exceptions: Array[String]): MethodVisitor = {

      val isNative = (access & Opcodes.ACC_NATIVE) != 0

      if (isNative) {
        _nativeClasses += fullyQualifiedName
      }

      null //return null, do not visit method further
    }

  }

  private def using[A >: Null <: Closeable, R](mkStream: => A)(action: A => R): R = {
    var stream: A = null
    try {
      stream = mkStream
      action(stream)
    } finally {
      if (stream != null) {
        stream.close()
      }
    }
  }

  
  def nativeClasses(classFile: File): Set[String] = using(new FileInputStream(classFile)) { in =>
    val reader = new ClassReader(in)
    val finder = new NativeFinder
    reader.accept(finder, 0)
    finder.nativeClasses
  }

} 
Example 86
Source File: ZipArchive.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.codepropertygraph.cpgloading

import java.io.Closeable
import java.nio.file.attribute.BasicFileAttributes
import java.nio.file.{FileSystem, FileSystems, FileVisitResult, Files, Path, Paths, SimpleFileVisitor}
import java.util.{Collection => JCollection}

import scala.collection.mutable.ArrayBuffer
import scala.jdk.CollectionConverters._

class ZipArchive(inputFile: String) extends Closeable {
  private val zipFileSystem: FileSystem = FileSystems.newFileSystem(Paths.get(inputFile), null)

  private def root: Path = zipFileSystem.getRootDirectories.iterator.next

  private def walk(rootPath: Path): Seq[Path] = {
    val entries = ArrayBuffer[Path]()
    Files.walkFileTree(
      rootPath,
      new SimpleFileVisitor[Path]() {
        override def visitFile(file: Path, attrs: BasicFileAttributes): FileVisitResult = {
          if (attrs.isRegularFile)
            entries += file
          FileVisitResult.CONTINUE
        }
      }
    )
    entries.toSeq
  }

  def entries: Seq[Path] = walk(root)

  def getFileEntries: JCollection[Path] = entries.asJava

  override def close(): Unit = zipFileSystem.close()
} 
Example 87
Source File: BlockTransferService.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network

import java.io.Closeable
import java.nio.ByteBuffer

import scala.concurrent.{Promise, Await, Future}
import scala.concurrent.duration.Duration

import org.apache.spark.Logging
import org.apache.spark.network.buffer.{NioManagedBuffer, ManagedBuffer}
import org.apache.spark.network.shuffle.{ShuffleClient, BlockFetchingListener}
import org.apache.spark.storage.{BlockManagerId, BlockId, StorageLevel}

private[spark]
abstract class BlockTransferService extends ShuffleClient with Closeable with Logging {

  
  def uploadBlockSync(
      hostname: String,
      port: Int,
      execId: String,
      blockId: BlockId,
      blockData: ManagedBuffer,
      level: StorageLevel): Unit = {
    Await.result(uploadBlock(hostname, port, execId, blockId, blockData, level), Duration.Inf)
  }
} 
Example 88
Source File: BlockTransferService.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network

import java.io.Closeable
import java.nio.ByteBuffer

import scala.concurrent.{Future, Promise}
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag

import org.apache.spark.internal.Logging
import org.apache.spark.network.buffer.{ManagedBuffer, NioManagedBuffer}
import org.apache.spark.network.shuffle.{BlockFetchingListener, ShuffleClient}
import org.apache.spark.storage.{BlockId, StorageLevel}
import org.apache.spark.util.ThreadUtils

private[spark]
abstract class BlockTransferService extends ShuffleClient with Closeable with Logging {

  
  def uploadBlockSync(
      hostname: String,
      port: Int,
      execId: String,
      blockId: BlockId,
      blockData: ManagedBuffer,
      level: StorageLevel,
      classTag: ClassTag[_]): Unit = {
    val future = uploadBlock(hostname, port, execId, blockId, blockData, level, classTag)
    ThreadUtils.awaitResult(future, Duration.Inf)
  }
} 
Example 89
Source File: BatchingQueue.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.participant.state.kvutils.api

import java.io.Closeable
import java.util.concurrent.atomic.AtomicBoolean

import akka.stream.scaladsl.{Sink, Source, SourceQueueWithComplete}
import akka.stream.{Materializer, OverflowStrategy, QueueOfferResult}
import com.daml.ledger.participant.state.kvutils.DamlKvutils.DamlSubmissionBatch
import com.daml.ledger.participant.state.v1.SubmissionResult

import scala.concurrent.Future
import scala.concurrent.duration._

object BatchingQueue {
  type CommitBatchFunction =
    Seq[DamlSubmissionBatch.CorrelatedSubmission] => Future[Unit]
}


case class DefaultBatchingQueue(
    maxQueueSize: Int,
    maxBatchSizeBytes: Long,
    maxWaitDuration: FiniteDuration,
    maxConcurrentCommits: Int
) extends BatchingQueue {
  private val queue: Source[
    Seq[DamlSubmissionBatch.CorrelatedSubmission],
    SourceQueueWithComplete[DamlSubmissionBatch.CorrelatedSubmission]] =
    Source
      .queue(maxQueueSize, OverflowStrategy.dropNew)
      .groupedWeightedWithin(maxBatchSizeBytes, maxWaitDuration)(
        (cs: DamlSubmissionBatch.CorrelatedSubmission) => cs.getSubmission.size.toLong)

  def run(commitBatch: Seq[DamlSubmissionBatch.CorrelatedSubmission] => Future[Unit])(
      implicit materializer: Materializer): RunningBatchingQueueHandle = {
    val materializedQueue = queue
      .mapAsync(maxConcurrentCommits)(commitBatch)
      .to(Sink.ignore)
      .run()

    val queueAlive = new AtomicBoolean(true)
    materializedQueue.watchCompletion.foreach { _ =>
      queueAlive.set(false)
    }(materializer.executionContext)

    new RunningBatchingQueueHandle {
      override def alive: Boolean = queueAlive.get()

      override def offer(
          submission: DamlSubmissionBatch.CorrelatedSubmission): Future[SubmissionResult] = {
        materializedQueue
          .offer(submission)
          .map {
            case QueueOfferResult.Enqueued => SubmissionResult.Acknowledged
            case QueueOfferResult.Dropped => SubmissionResult.Overloaded
            case f: QueueOfferResult.Failure => SubmissionResult.InternalError(f.toString)
            case QueueOfferResult.QueueClosed =>
              SubmissionResult.InternalError("DefaultBatchingQueue.queue is closed")
          }(materializer.executionContext)
      }

      override def close(): Unit = {
        materializedQueue.complete()
      }
    }
  }
} 
Example 90
Source File: FileBasedWriteAheadLogRandomReader.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.io.Closeable
import java.nio.ByteBuffer

import org.apache.hadoop.conf.Configuration


private[streaming] class FileBasedWriteAheadLogRandomReader(path: String, conf: Configuration)
  extends Closeable {

  private val instream = HdfsUtils.getInputStream(path, conf)
  private var closed = (instream == null) // the file may be deleted as we're opening the stream

  def read(segment: FileBasedWriteAheadLogSegment): ByteBuffer = synchronized {
    assertOpen()
    instream.seek(segment.offset)
    val nextLength = instream.readInt()
    HdfsUtils.checkState(nextLength == segment.length,
      s"Expected message length to be ${segment.length}, but was $nextLength")
    val buffer = new Array[Byte](nextLength)
    instream.readFully(buffer)
    ByteBuffer.wrap(buffer)
  }

  override def close(): Unit = synchronized {
    closed = true
    instream.close()
  }

  private def assertOpen() {
    HdfsUtils.checkState(!closed, "Stream is closed. Create a new Reader to read from the file.")
  }
} 
Example 91
Source File: HadoopFileLinesReader.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources

import java.io.Closeable
import java.net.URI

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.input.{FileSplit, LineRecordReader}
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl


class HadoopFileLinesReader(
    file: PartitionedFile, conf: Configuration) extends Iterator[Text] with Closeable {
  private val iterator = {
    val fileSplit = new FileSplit(
      new Path(new URI(file.filePath)),
      file.start,
      file.length,
      // TODO: Implement Locality
      Array.empty)
    val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
    val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)
    val reader = new LineRecordReader()
    reader.initialize(fileSplit, hadoopAttemptContext)
    new RecordReaderIterator(reader)
  }

  override def hasNext: Boolean = iterator.hasNext

  override def next(): Text = iterator.next()

  override def close(): Unit = iterator.close()
} 
Example 92
Source File: RecordReaderIterator.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources

import java.io.Closeable

import org.apache.hadoop.mapreduce.RecordReader

import org.apache.spark.sql.catalyst.InternalRow


class RecordReaderIterator[T](
    private[this] var rowReader: RecordReader[_, T]) extends Iterator[T] with Closeable {
  private[this] var havePair = false
  private[this] var finished = false

  override def hasNext: Boolean = {
    if (!finished && !havePair) {
      finished = !rowReader.nextKeyValue
      if (finished) {
        // Close and release the reader here; close() will also be called when the task
        // completes, but for tasks that read from many files, it helps to release the
        // resources early.
        close()
      }
      havePair = !finished
    }
    !finished
  }

  override def next(): T = {
    if (!hasNext) {
      throw new java.util.NoSuchElementException("End of stream")
    }
    havePair = false
    rowReader.getCurrentValue
  }

  override def close(): Unit = {
    if (rowReader != null) {
      try {
        // Close the reader and release it. Note: it's very important that we don't close the
        // reader more than once, since that exposes us to MAPREDUCE-5918 when running against
        // older Hadoop 2.x releases. That bug can lead to non-deterministic corruption issues
        // when reading compressed input.
        rowReader.close()
      } finally {
        rowReader = null
      }
    }
  }
} 
Example 93
Source File: LagomDevModeConsoleHelper.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.devmode

import java.io.Closeable
import java.util.concurrent.Executors
import java.util.concurrent.TimeUnit

import com.lightbend.lagom.devmode.Reloader.DevServerBinding
import play.dev.filewatch.LoggerProxy

import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.duration._


class ConsoleHelper(colors: Colors) {
  def printStartScreen(log: LoggerProxy, services: Seq[ServiceBindingInfo]): Unit = {
    services.foreach {
      case ServiceBindingInfo(name, bindings) =>
        bindings.foreach(b => log.info(s"Service $name listening for ${b.protocol} on ${b.address}:${b.port}"))
    }
    log.info(
      colors.green(
        s"(Service${if (services.size > 1) "s" else ""} started, press enter to stop and go back to the console...)"
      )
    )
  }

  def blockUntilExit() = {
    // blocks until user presses enter
    System.in.read()
  }

  def shutdownAsynchronously(log: LoggerProxy, services: Seq[Closeable], infrastructureServices: Seq[Closeable]) = {
    // shut down all running services
    log.info("Stopping services")

    val n = java.lang.Runtime.getRuntime.availableProcessors
    log.debug("nb proc : " + n)
    //creating a dedicated execution context
    // with a fixed number of thread (indexed on number of cpu)
    implicit val ecn = ExecutionContext.fromExecutorService(
      Executors.newFixedThreadPool(n)
    )

    try {
      //Stop services in asynchronous manner
      val closing = Future.traverse(services)(serv =>
        Future {
          serv.close()
        }
      )
      closing.onComplete(_ => log.info("All services are stopped"))
      Await.result(closing, 60.seconds)

      println()
      // and finally shut down any other possibly running embedded server
      Await.result(
        Future.traverse(infrastructureServices)(serv =>
          Future {
            serv.close()
          }
        ),
        60.seconds
      )
    } finally {
      // and the last part concern the closing of execution context that has been created above
      ecn.shutdown()
      ecn.awaitTermination(60, TimeUnit.SECONDS)
    }
  }
}

class Colors(logNoFormat: String) {
  import scala.Console._

  val isANSISupported: Boolean = {
    Option(System.getProperty(logNoFormat))
      .map(_ != "true")
      .orElse {
        Option(System.getProperty("os.name"))
          .map(_.toLowerCase(java.util.Locale.ENGLISH))
          .filter(_.contains("windows"))
          .map(_ => false)
      }
      .getOrElse(true)
  }

  private def color(code: String, str: String) = if (isANSISupported) code + str + RESET else str

  def red(str: String): String     = color(RED, str)
  def blue(str: String): String    = color(BLUE, str)
  def cyan(str: String): String    = color(CYAN, str)
  def green(str: String): String   = color(GREEN, str)
  def magenta(str: String): String = color(MAGENTA, str)
  def white(str: String): String   = color(WHITE, str)
  def black(str: String): String   = color(BLACK, str)
  def yellow(str: String): String  = color(YELLOW, str)
} 
Example 94
Source File: Utils.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy

import java.io.{Closeable, File, InputStreamReader}
import java.net.URL
import java.nio.charset.StandardCharsets.UTF_8
import java.security.SecureRandom
import java.util.Properties

import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.concurrent.TimeoutException
import scala.concurrent.duration.Duration

import org.apache.commons.codec.binary.Base64

object Utils {
  def getPropertiesFromFile(file: File): Map[String, String] = {
    loadProperties(file.toURI().toURL())
  }

  def loadProperties(url: URL): Map[String, String] = {
    val inReader = new InputStreamReader(url.openStream(), UTF_8)
    try {
      val properties = new Properties()
      properties.load(inReader)
      properties.stringPropertyNames().asScala.map { k =>
        (k, properties.getProperty(k).trim())
      }.toMap
    } finally {
      inReader.close()
    }
  }

  
  def isProcessAlive(process: Process): Boolean = {
    try {
      process.exitValue()
      false
    } catch {
      case _: IllegalThreadStateException =>
        true
    }
  }

  def startDaemonThread(name: String)(f: => Unit): Thread = {
    val thread = new Thread(name) {
      override def run(): Unit = f
    }
    thread.setDaemon(true)
    thread.start()
    thread
  }

  def usingResource[A <: Closeable, B](resource: A)(f: A => B): B = {
    try {
      f(resource)
    } finally {
      resource.close()
    }
  }

  def createSecret(secretBitLength: Int): String = {
    val rnd = new SecureRandom()
    val secretBytes = new Array[Byte](secretBitLength / java.lang.Byte.SIZE)
    rnd.nextBytes(secretBytes)

    Base64.encodeBase64String(secretBytes)
  }
} 
Example 95
Source File: ProcessLogRedirector.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.util

import java.io.{Closeable, Flushable}
import scala.sys.process.ProcessLogger

import org.slf4j.LoggerFactory


class ProcessLogRedirector extends ProcessLogger with Closeable with Flushable with ConsoleOutput {
  private val LOG = LoggerFactory.getLogger("redirect")

  // We only capture the first 1K chars
  private final val LENGTH = 1000
  private var _error: String = ""
  private var _output: String = ""

  def error: String = _error
  def output: String = _output

  def out(s: => String): Unit = {
    if (_output.length <= LENGTH) {
      _output += "\n" + s
    }
    LOG.info(s)
  }
  def err(s: => String): Unit = {
    if (_error.length <= LENGTH) {
      _error += "\n" + s
    }
    LOG.error(s)
  }
  def buffer[T](f: => T): T = f
  def close(): Unit = Unit
  def flush(): Unit = Unit
} 
Example 96
Source File: Context.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.transport.netty

import java.io.Closeable
import java.util.concurrent._

import scala.collection.JavaConverters._

import akka.actor.{ActorRef, ActorSystem, Props}
import com.typesafe.config.Config
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory
import org.slf4j.Logger

import org.apache.gearpump.transport.netty.Server.ServerPipelineFactory
import org.apache.gearpump.transport.{ActorLookupById, HostPort}
import org.apache.gearpump.util.{Constants, LogUtil}

object Context {
  private final val LOG: Logger = LogUtil.getLogger(getClass)
}


  def close(): Unit = {

    LOG.info(s"Context.term, cleanup resources...., " +
      s"we have ${closeHandler.size()} items to close...")

    // Cleans up resource in reverse order so that client actor can be cleaned
    // before clientChannelFactory
    closeHandler.iterator().asScala.toList.reverse.foreach(_.close())
  }
} 
Example 97
Source File: AppLoader.scala    From play-quill-jdbc   with MIT License 5 votes vote down vote up
import java.io.Closeable
import javax.sql.DataSource

import controllers.UsersController
import io.getquill._
import play.api.ApplicationLoader.Context
import play.api._
import play.api.db.evolutions.Evolutions
import play.api.db.{DBComponents, HikariCPComponents}
import play.api.inject.{Injector, NewInstanceInjector, SimpleInjector}
import play.api.routing.Router
import play.api.routing.sird._
import models.{Users}

class AppLoader extends ApplicationLoader {
  override def load(context: Context): Application = new BuiltInComponentsFromContext(context) with DBComponents with HikariCPComponents {

    lazy val db = new H2JdbcContext[SnakeCase](dbApi.database("default").dataSource.asInstanceOf[DataSource with Closeable])


    lazy val users = new Users(db)
    lazy val usersController = new UsersController(users)

    val router = Router.from {
      case GET(p"/users/${long(id)}")    => usersController.get(id)
      case POST(p"/users")               => usersController.create
      case DELETE(p"/users/${long(id)}") => usersController.delete(id)
      case PUT(p"/users/${long(id)}")    => usersController.update(id)
    }

    override lazy val injector: Injector =
      new SimpleInjector(NewInstanceInjector) + users + router + cookieSigner + csrfTokenSigner + httpConfiguration + tempFileCreator + global

    Evolutions.applyEvolutions(dbApi.database("default"))

  }.application
} 
Example 98
Source File: BundleSerializer.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.bundle.serializer

import java.io.Closeable
import java.nio.file.Files

import ml.combust.bundle.{BundleContext, BundleFile, HasBundleRegistry}
import ml.combust.bundle.dsl.Bundle
import ml.combust.bundle.json.JsonSupport._
import spray.json._
import resource._

import scala.util.Try


  def read[Transformer <: AnyRef](): Try[Bundle[Transformer]] = {
    for(info <- file.readInfo();
        bundleContext = BundleContext(context,
          info.format,
          hr.bundleRegistry,
          file.fs,
          file.path);
        root <- NodeSerializer(bundleContext.bundleContext("root")).read()) yield {
      Bundle(info, root.asInstanceOf[Transformer])
    }
  }

  override def close(): Unit = file.close()
} 
Example 99
Source File: ExecutorCloser.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.common
import java.io.Closeable
import java.util.concurrent.{ExecutorService, TimeUnit}

import akka.event.slf4j.SLF4JLogging

import scala.concurrent.duration._

case class ExecutorCloser(service: ExecutorService, timeout: FiniteDuration = 5.seconds)
    extends Closeable
    with SLF4JLogging {
  override def close(): Unit = {
    try {
      service.shutdown()
      service.awaitTermination(timeout.toSeconds, TimeUnit.SECONDS)
    } catch {
      case e: InterruptedException =>
        log.error("Error while shutting down the ExecutorService", e)
        Thread.currentThread.interrupt()
    } finally {
      if (!service.isShutdown) {
        log.warn(s"ExecutorService `$service` didn't shutdown property. Will be forced now.")
      }
      service.shutdownNow()
    }
  }
} 
Example 100
Source File: HadoopFileExcelReader.scala    From spark-hadoopoffice-ds   with Apache License 2.0 5 votes vote down vote up
package org.zuinnote.spark.office.excel

import java.io.Closeable
import java.net.URI

import org.apache.spark.sql.execution.datasources._

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.ArrayWritable
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.input.{ FileSplit, LineRecordReader }
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl

import org.apache.spark.sql.execution.datasources.RecordReaderIterator

import org.zuinnote.hadoop.office.format.mapreduce.ExcelFileInputFormat
import org.zuinnote.hadoop.office.format.mapreduce.ExcelRecordReader

import org.apache.commons.logging.LogFactory
import org.apache.commons.logging.Log


class HadoopFileExcelReader(
  file: PartitionedFile, conf: Configuration) extends Iterator[ArrayWritable] with Closeable {
  val LOG = LogFactory.getLog(classOf[HadoopFileExcelReader])
  private var reader: RecordReader[Text, ArrayWritable] = null
  private val iterator = {
    val fileSplit = new FileSplit(
      new Path(new URI(file.filePath)), file.start, file.length, Array.empty) // todo: implement locality (replace Array.empty with the locations)
    val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
    val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)
    val inputFormat = new ExcelFileInputFormat()
    reader = inputFormat.createRecordReader(fileSplit, hadoopAttemptContext)
    reader.initialize(fileSplit, hadoopAttemptContext)
    new RecordReaderIterator(reader)
  }
  
  def getReader: RecordReader[Text, ArrayWritable] = reader

  override def hasNext: Boolean = iterator.hasNext

  override def next(): ArrayWritable = iterator.next()

  override def close(): Unit = {
    if (reader != null) {
      reader.close()
    }
  }
}