java.util.TimerTask Scala Examples
The following examples show how to use java.util.TimerTask.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: HttpInputDStream.scala From prosparkstreaming with Apache License 2.0 | 5 votes |
package org.apress.prospark import java.util.Timer import java.util.TimerTask import scala.reflect.ClassTag import org.apache.http.client.methods.HttpGet import org.apache.http.impl.client.CloseableHttpClient import org.apache.http.impl.client.HttpClients import org.apache.http.util.EntityUtils import org.apache.spark.Logging import org.apache.spark.storage.StorageLevel import org.apache.spark.streaming.StreamingContext import org.apache.spark.streaming.api.java.JavaDStream import org.apache.spark.streaming.api.java.JavaDStream.fromDStream import org.apache.spark.streaming.api.java.JavaStreamingContext import org.apache.spark.streaming.dstream.DStream import org.apache.spark.streaming.dstream.ReceiverInputDStream import org.apache.spark.streaming.receiver.Receiver class HttpInputDStream( @transient ssc_ : StreamingContext, storageLevel: StorageLevel, url: String, interval: Long) extends ReceiverInputDStream[String](ssc_) with Logging { def getReceiver(): Receiver[String] = { new HttpReceiver(storageLevel, url, interval) } } class HttpReceiver( storageLevel: StorageLevel, url: String, interval: Long) extends Receiver[String](storageLevel) with Logging { var httpClient: CloseableHttpClient = _ var trigger: Timer = _ def onStop() { httpClient.close() logInfo("Disconnected from Http Server") } def onStart() { httpClient = HttpClients.createDefault() trigger = new Timer() trigger.scheduleAtFixedRate(new TimerTask { def run() = doGet() }, 0, interval * 1000) logInfo("Http Receiver initiated") } def doGet() { logInfo("Fetching data from Http source") val response = httpClient.execute(new HttpGet(url)) try { val content = EntityUtils.toString(response.getEntity()) store(content) } catch { case e: Exception => restart("Error! Problems while connecting", e) } finally { response.close() } } } object HttpUtils { def createStream( ssc: StreamingContext, storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2, url: String, interval: Long): DStream[String] = { new HttpInputDStream(ssc, storageLevel, url, interval) } def createStream( jssc: JavaStreamingContext, storageLevel: StorageLevel, url: String, interval: Long): JavaDStream[String] = { implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[String]] createStream(jssc.ssc, storageLevel, url, interval) } }
Example 2
Source File: HttpInputDStream.scala From prosparkstreaming with Apache License 2.0 | 5 votes |
package org.apress.prospark import java.util.Timer import java.util.TimerTask import scala.reflect.ClassTag import org.apache.http.client.methods.HttpGet import org.apache.http.impl.client.CloseableHttpClient import org.apache.http.impl.client.HttpClients import org.apache.http.util.EntityUtils import org.apache.spark.Logging import org.apache.spark.storage.StorageLevel import org.apache.spark.streaming.StreamingContext import org.apache.spark.streaming.api.java.JavaDStream import org.apache.spark.streaming.api.java.JavaDStream.fromDStream import org.apache.spark.streaming.api.java.JavaStreamingContext import org.apache.spark.streaming.dstream.DStream import org.apache.spark.streaming.dstream.ReceiverInputDStream import org.apache.spark.streaming.receiver.Receiver class HttpInputDStream( @transient ssc_ : StreamingContext, storageLevel: StorageLevel, url: String, interval: Long) extends ReceiverInputDStream[String](ssc_) with Logging { def getReceiver(): Receiver[String] = { new HttpReceiver(storageLevel, url, interval) } } class HttpReceiver( storageLevel: StorageLevel, url: String, interval: Long) extends Receiver[String](storageLevel) with Logging { var httpClient: CloseableHttpClient = _ var trigger: Timer = _ def onStop() { httpClient.close() logInfo("Disconnected from Http Server") } def onStart() { httpClient = HttpClients.createDefault() trigger = new Timer() trigger.scheduleAtFixedRate(new TimerTask { def run() = doGet() }, 0, interval * 1000) logInfo("Http Receiver initiated") } def doGet() { logInfo("Fetching data from Http source") val response = httpClient.execute(new HttpGet(url)) try { val content = EntityUtils.toString(response.getEntity()) store(content) } catch { case e: Exception => restart("Error! Problems while connecting", e) } finally { response.close() } } } object HttpUtils { def createStream( ssc: StreamingContext, storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2, url: String, interval: Long): DStream[String] = { new HttpInputDStream(ssc, storageLevel, url, interval) } def createStream( jssc: JavaStreamingContext, storageLevel: StorageLevel, url: String, interval: Long): JavaDStream[String] = { implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[String]] createStream(jssc.ssc, storageLevel, url, interval) } }
Example 3
Source File: Utils.scala From telegram with Apache License 2.0 | 5 votes |
import java.util.{Timer, TimerTask} import scala.concurrent.duration.Duration import scala.concurrent.{Future, Promise} import scala.util.Try object Utils { def after[T](duration: Duration)(block: => T): Future[T] = { val promise = Promise[T]() val t = new Timer() t.schedule(new TimerTask { override def run(): Unit = { promise.complete(Try(block)) } }, duration.toMillis) promise.future } }
Example 4
Source File: DeferredRendering.scala From scalismo-ui with GNU General Public License v3.0 | 5 votes |
package scalismo.ui.rendering.internal import java.util.{Timer, TimerTask} object DeferredRendering { var DelayMs = 25 } class DeferredRendering(operation: => Unit) extends Timer(true) { private class Skipped { var count: Int = 0 } private val skipped = new Skipped private var pending: Option[DeferredRenderTask] = None private class DeferredRenderTask extends TimerTask { override def run(): Unit = { operation skipped.synchronized { //println(s"skipped: ${skipped.count}") skipped.count = 0 pending = None } } } def request(): Unit = skipped.synchronized { if (pending.isEmpty) { val task = new DeferredRenderTask pending = Some(task) super.schedule(task, DeferredRendering.DelayMs) } else { skipped.count += 1 } } }
Example 5
Source File: JsonFileReporter.scala From kyuubi with Apache License 2.0 | 5 votes |
package yaooqinn.kyuubi.metrics import java.io.{BufferedWriter, Closeable, IOException, OutputStreamWriter} import java.util.{Timer, TimerTask} import java.util.concurrent.TimeUnit import scala.util.Try import scala.util.control.NonFatal import com.codahale.metrics.MetricRegistry import com.codahale.metrics.json.MetricsModule import com.fasterxml.jackson.databind.ObjectMapper import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.hadoop.fs.permission.FsPermission import org.apache.kyuubi.Logging import org.apache.spark.{KyuubiSparkUtil, SparkConf} import org.apache.spark.KyuubiConf._ private[metrics] class JsonFileReporter(conf: SparkConf, registry: MetricRegistry) extends Closeable with Logging { private val jsonMapper = new ObjectMapper().registerModule( new MetricsModule(TimeUnit.MILLISECONDS, TimeUnit.MILLISECONDS, false)) private val timer = new Timer(true) private val interval = KyuubiSparkUtil.timeStringAsMs(conf.get(METRICS_REPORT_INTERVAL)) private val path = conf.get(METRICS_REPORT_LOCATION) private val hadoopConf = KyuubiSparkUtil.newConfiguration(conf) def start(): Unit = { timer.schedule(new TimerTask { var bw: BufferedWriter = _ override def run(): Unit = try { val json = jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(registry) val tmpPath = new Path(path + ".tmp") val tmpPathUri = tmpPath.toUri val fs = if (tmpPathUri.getScheme == null && tmpPathUri.getAuthority == null) { FileSystem.getLocal(hadoopConf) } else { FileSystem.get(tmpPathUri, hadoopConf) } fs.delete(tmpPath, true) bw = new BufferedWriter(new OutputStreamWriter(fs.create(tmpPath, true))) bw.write(json) bw.close() fs.setPermission(tmpPath, FsPermission.createImmutable(Integer.parseInt("644", 8).toShort)) val finalPath = new Path(path) fs.rename(tmpPath, finalPath) fs.setPermission(finalPath, FsPermission.createImmutable(Integer.parseInt("644", 8).toShort)) } catch { case NonFatal(e) => error("Error writing metrics to json file" + path, e) } finally { if (bw != null) { Try(bw.close()) } } }, 0, interval) } override def close(): Unit = { timer.cancel() } }
Example 6
Source File: TimerSchedule.scala From spark1.52 with Apache License 2.0 | 5 votes |
package scalaDemo import java.text.SimpleDateFormat import java.util.{Timer, TimerTask} object TimerSchedule { val fTime = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss") val d1 = fTime.parse("2005/12/30 14:10:00"); val timer: Timer = new Timer(); timer.scheduleAtFixedRate(new TimerTask() { override def run(): Unit = { System.out.println("this is task you do6"); } }, d1, 3 * 60 * 1000); } }
Example 7
Source File: PromiseTimedEventTestApp.scala From spark1.52 with Apache License 2.0 | 5 votes |
package scalaDemo import java.util.{Timer, TimerTask} import scala.concurrent._ def delayedFailure(secs: Int, msg: String): Future[Int] = { val result = Promise[Int] timer.schedule(new TimerTask() { def run() = { result.failure(new IllegalArgumentException(msg)) } }, secs * 10) result.future } delayedSuccess(1,timer) delayedFailure(2,"delayedFailure") }
Example 8
Source File: TimedEvent.scala From spark1.52 with Apache License 2.0 | 5 votes |
package scalaDemo.threadConcurrency import java.util.{Timer, TimerTask} import scala.concurrent._ def delayedFailure(secs: Int, msg: String): Future[Int] = { val result = Promise[Int]//Promise 交由任务执行者,任务执行者通过 Promise 可以标记任务完成或者失败 timer.schedule(new TimerTask() { def run() = { result.failure(new IllegalArgumentException(msg)) } }, secs * 1000) //Future 表示一个可能还没有实际完成的异步任务的结果,针对这个结果可以添加 Callback 以便在任务执行成功或失败后做出对应的操作 result.future } }
Example 9
Source File: Scheduler.scala From korolev with Apache License 2.0 | 5 votes |
package korolev.effect import java.util.{Timer, TimerTask} import korolev.effect.Effect.Promise import korolev.effect.syntax._ import scala.concurrent.ExecutionContext import scala.concurrent.duration.FiniteDuration final class Scheduler[F[_]: Effect](implicit ec: ExecutionContext) { import Scheduler._ private val timer = new Timer() def schedule(delay: FiniteDuration): F[Stream[F, Unit]] = Effect[F].delay { new Stream[F, Unit] { var canceled = false var cb: Either[Throwable, Option[Unit]] => Unit = _ var task: TimerTask = _ def pull(): F[Option[Unit]] = Effect[F].promise { cb => if (canceled) cb(Right(None)) else { this.cb = cb this.task = new TimerTask { def run(): Unit = cb(Right(Some(()))) } timer.schedule(task, delay.toMillis) } } def cancel(): F[Unit] = Effect[F].delay { if (task != null) { canceled = true task.cancel() task = null cb(Right(None)) } } } } def sleep(delay: FiniteDuration): F[Unit] = Effect[F].promise { cb => val task: TimerTask = new TimerTask { def run(): Unit = cb(Right(())) } timer.schedule(task, delay.toMillis) } def scheduleOnce[T](delay: FiniteDuration)(job: => F[T]): F[JobHandler[F, T]] = Effect[F].delay(unsafeScheduleOnce(delay)(job)) def unsafeScheduleOnce[T](delay: FiniteDuration)(job: => F[T]): JobHandler[F, T] = new JobHandler[F, T] { @volatile private var completed: Either[Throwable, T] = _ @volatile private var promise: Promise[T] = _ private val task = new TimerTask { def run(): Unit = { Effect[F] .fork(job) .runAsync { errorOrResult => if (promise != null) promise(errorOrResult) else completed = errorOrResult } } } def result: F[T] = Effect[F].promise { cb => if (completed != null) cb(completed) else promise = cb } def cancel(): F[Unit] = Effect[F].delay(unsafeCancel()) def unsafeCancel(): Unit = { task.cancel() () } timer.schedule(task, delay.toMillis) } } object Scheduler { trait JobHandler[F[_], T] { def unsafeCancel(): Unit def cancel(): F[Unit] def result: F[T] } }
Example 10
Source File: MapSpec.scala From ez-framework with Apache License 2.0 | 5 votes |
package com.ecfront.ez.framework.cluster.redis import java.util.concurrent.CountDownLatch import java.util.{Timer, TimerTask} import com.ecfront.ez.framework.core.EZ import com.ecfront.ez.framework.test.MockStartupSpec import scala.beans.BeanProperty class MapSpec extends MockStartupSpec { test("Map Test") { val mapObj = EZ.dist.map[TestMapObj]("test_obj_map") mapObj.clear() val obj = new TestMapObj obj.a = "测试" assert(mapObj.put("a", obj).get("a").a == "测试") val map = EZ.dist.map[Long]("test_map") map.clear() val timer = new Timer() timer.schedule(new TimerTask { override def run(): Unit = { map.put("a", System.currentTimeMillis()) } }, 0, 1000) timer.schedule(new TimerTask { override def run(): Unit = { map.foreach({ (k, v) => println(">>a:" + v) }) } }, 0, 10000) new CountDownLatch(1).await() } } class TestMapObj extends Serializable { @BeanProperty var a: String = _ }
Example 11
Source File: CustomerTimerDemo.scala From flink-rookie with Apache License 2.0 | 5 votes |
package com.venn.stream.api.timer import java.io.File import java.sql.{Connection, DriverManager, PreparedStatement, SQLException} import java.util import java.util.{Timer, TimerTask} import org.apache.flink.api.scala._ import com.venn.common.Common import com.venn.util.TwoStringSource import org.apache.flink.api.common.functions.RichMapFunction import org.apache.flink.api.common.serialization.SimpleStringSchema import org.apache.flink.configuration.Configuration import org.apache.flink.runtime.state.filesystem.FsStateBackend import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment import org.apache.flink.streaming.api.{CheckpointingMode, TimeCharacteristic} import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer import org.slf4j.LoggerFactory def query() = { logger.info("query mysql") try { Class.forName(driverName) conn = DriverManager.getConnection(jdbcUrl, username, password) ps = conn.prepareStatement("select id,name from venn.timer") val rs = ps.executeQuery while (!rs.isClosed && rs.next) { val id = rs.getString(1) val name = rs.getString(2) map.put(id, name) } logger.info("get config from db size : {}", map.size()) } catch { case e@(_: ClassNotFoundException | _: SQLException) => e.printStackTrace() } finally { if (conn != null) { conn.close() } } } }) // .print() val sink = new FlinkKafkaProducer[String]("timer_out" , new SimpleStringSchema() , Common.getProp) stream.addSink(sink) env.execute(this.getClass.getName) } }