java.util.function.Supplier Scala Examples
The following examples show how to use java.util.function.Supplier.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: DefaultBodyWritables.scala From play-ws with Apache License 2.0 | 5 votes |
package play.api.libs.ws import java.io.File import java.nio.ByteBuffer import java.util.function.Supplier import akka.stream.scaladsl.StreamConverters.fromInputStream import akka.stream.scaladsl.FileIO import akka.stream.scaladsl.Source import akka.util.ByteString import scala.compat.java8.FunctionConverters.asScalaFromSupplier implicit val writeableOf_urlEncodedForm: BodyWritable[Map[String, Seq[String]]] = { import java.net.URLEncoder BodyWritable( formData => InMemoryBody( ByteString.fromString( formData.flatMap(item => item._2.map(c => s"${item._1}=${URLEncoder.encode(c, "UTF-8")}")).mkString("&") ) ), "application/x-www-form-urlencoded" ) } implicit val writeableOf_urlEncodedSimpleForm: BodyWritable[Map[String, String]] = { writeableOf_urlEncodedForm.map[Map[String, String]](_.map(kv => kv._1 -> Seq(kv._2))) } } object DefaultBodyWritables extends DefaultBodyWritables
Example 2
Source File: BlockSolarPanel.scala From Electrodynamics with GNU Lesser General Public License v3.0 | 5 votes |
package com.calclavia.edx.electric.circuit.source import java.util.function.Supplier import java.util.{Optional, Set => JSet} import com.calclavia.edx.core.EDX import com.calclavia.edx.core.prefab.BlockEDX import com.calclavia.edx.electric.ElectricContent import com.calclavia.edx.electric.api.{ConnectionBuilder, Electric} import com.calclavia.edx.electric.grid.NodeElectricComponent import nova.core.block.Stateful import nova.core.component.renderer.{ItemRenderer, StaticRenderer} import nova.core.render.model.Model import nova.core.render.pipeline.{BlockRenderPipeline, ConnectedTextureRenderPipeline} import nova.core.render.texture.Texture import nova.core.util.Direction import nova.core.util.shape.Cuboid import nova.scala.component.IO import nova.scala.util.ExtendedUpdater import nova.scala.wrapper.FunctionalWrapper._ class BlockSolarPanel extends BlockEDX with ExtendedUpdater with Stateful { private val electricNode = components.add(new NodeElectricComponent(this)) private val io = components.add(new IO(this)) private val renderer = components.add(new StaticRenderer()) private val itemRenderer = components.add(new ItemRenderer(this)) private val texture = func[Direction, Optional[Texture]] { dir => dir match { case Direction.DOWN => Optional.of(ElectricContent.solarPanelTextureBottom) case Direction.UP => Optional.of(ElectricContent.solarPanelTextureTop) case _ => Optional.of(ElectricContent.solarPanelTextureSide) } } renderer.onRender( new ConnectedTextureRenderPipeline(this, ElectricContent.solarPanelTextureEdge) .withFaceMask(2) .withTexture(texture) .build() ) collider.setBoundingBox(new Cuboid(0, 0, 0, 1, 0.3f, 1)) collider.isCube(false) collider.isOpaqueCube(false) //TODO: Solar panels should only connect to wires o nthe same side electricNode.setPositiveConnections( new ConnectionBuilder(classOf[Electric]) .setBlock(this) .setConnectMask(supplier(() => io.inputMask)) .adjacentWireSupplier() .asInstanceOf[Supplier[JSet[Electric]]] ) electricNode.setNegativeConnections( new ConnectionBuilder(classOf[Electric]) .setBlock(this) .setConnectMask(supplier(() => io.outputMask)) .adjacentWireSupplier() .asInstanceOf[Supplier[JSet[Electric]]] ) itemRenderer.onRender((model: Model) => new BlockRenderPipeline(this) .withTexture(texture) .build() ) override def update(deltaTime: Double) { super.update(deltaTime) if (EDX.network.isServer) { //if (world.canBlockSeeTheSky(xCoord, yCoord + 1, zCoord) && !this.worldObj.block.hasNoSky) { //if (world.isDaytime) { //if (!(world.isThundering || world.isRaining)) { electricNode.generateVoltage(15) //} //} //} } } }
Example 3
Source File: Retry.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.javadsl.persistence.jpa import java.util.concurrent.CompletionStage import java.util.function.Supplier import akka.actor.Scheduler import akka.pattern.after import scala.concurrent.duration.Duration.fromNanos import scala.concurrent.duration.FiniteDuration import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.util.control.NonFatal // With thanks to https://gist.github.com/viktorklang/9414163 private[lagom] class Retry(delay: FiniteDuration, delayFactor: Double, maxRetries: Int) { def apply[T](op: => T)(implicit ec: ExecutionContext, s: Scheduler): Future[T] = { def iterate(nextDelay: FiniteDuration, remainingRetries: Int): Future[T] = Future(op).recoverWith { case NonFatal(throwable) if remainingRetries > 0 => { onRetry(throwable, nextDelay, remainingRetries) after(nextDelay, s)(iterate(finiteMultiply(nextDelay, delayFactor), remainingRetries - 1)) } } iterate(delay, maxRetries) } // For convenient use from Java 8 def retry[T](op: Supplier[T])(implicit ec: ExecutionContext, s: Scheduler): CompletionStage[T] = { import scala.compat.java8.FutureConverters._ apply(op.get()).toJava } protected def onRetry(throwable: Throwable, delay: FiniteDuration, remainingRetries: Int): Unit = () private def finiteMultiply(duration: FiniteDuration, factor: Double): FiniteDuration = fromNanos((duration.toNanos * factor).toLong) } private[lagom] object Retry { def apply[T](delay: FiniteDuration, delayFactor: Double, maxRetries: Int)( op: => T )(implicit ec: ExecutionContext, s: Scheduler): Future[T] = (new Retry(delay, delayFactor, maxRetries))(op) }
Example 4
Source File: ConfigurationServiceLocatorSpec.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.javadsl.client import java.net.URI import java.util.concurrent.CompletionStage import java.util.concurrent.TimeUnit import java.util.function.Supplier import com.typesafe.config.ConfigFactory import scala.compat.java8.OptionConverters._ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec class ConfigurationServiceLocatorSpec extends AnyWordSpec with Matchers { val serviceLocator = new ConfigurationServiceLocator( ConfigFactory.parseString( """ |lagom.services { | foo = "http://localhost:10001" | bar = "http://localhost:10002" |} """.stripMargin ), new CircuitBreakersPanel { override def withCircuitBreaker[T](id: String, body: Supplier[CompletionStage[T]]): CompletionStage[T] = body.get() } ) def locate(serviceName: String) = serviceLocator.locate(serviceName).toCompletableFuture.get(10, TimeUnit.SECONDS).asScala "ConfigurationServiceLocator" should { "return a found service" in { locate("foo") should contain(URI.create("http://localhost:10001")) locate("bar") should contain(URI.create("http://localhost:10002")) } "return none for not found service" in { locate("none") shouldBe None } } }
Example 5
Source File: CassandraReadSideImpl.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.javadsl.persistence.cassandra import java.util import java.util.concurrent.CompletableFuture import java.util.concurrent.CompletionStage import java.util.function.BiFunction import java.util.function.Function import java.util.function.Supplier import javax.inject.Inject import javax.inject.Singleton import akka.Done import akka.actor.ActorSystem import com.datastax.driver.core.BoundStatement import com.lightbend.lagom.internal.javadsl.persistence.ReadSideImpl import com.lightbend.lagom.internal.persistence.cassandra.CassandraOffsetStore import com.lightbend.lagom.javadsl.persistence.ReadSideProcessor.ReadSideHandler import com.lightbend.lagom.javadsl.persistence._ import com.lightbend.lagom.javadsl.persistence.cassandra.CassandraReadSide.ReadSideHandlerBuilder import com.lightbend.lagom.javadsl.persistence.cassandra.CassandraReadSide import com.lightbend.lagom.javadsl.persistence.cassandra.CassandraSession import play.api.inject.Injector @Singleton private[lagom] final class CassandraReadSideImpl @Inject() ( system: ActorSystem, session: CassandraSession, offsetStore: CassandraOffsetStore, readSide: ReadSideImpl, injector: Injector ) extends CassandraReadSide { private val dispatcher = system.settings.config.getString("lagom.persistence.read-side.use-dispatcher") implicit val ec = system.dispatchers.lookup(dispatcher) override def builder[Event <: AggregateEvent[Event]](eventProcessorId: String): ReadSideHandlerBuilder[Event] = { new ReadSideHandlerBuilder[Event] { import CassandraAutoReadSideHandler.Handler private var prepareCallback: AggregateEventTag[Event] => CompletionStage[Done] = tag => CompletableFuture.completedFuture(Done.getInstance()) private var globalPrepareCallback: () => CompletionStage[Done] = () => CompletableFuture.completedFuture(Done.getInstance()) private var handlers = Map.empty[Class[_ <: Event], Handler[Event]] override def setGlobalPrepare(callback: Supplier[CompletionStage[Done]]): ReadSideHandlerBuilder[Event] = { globalPrepareCallback = () => callback.get this } override def setPrepare( callback: Function[AggregateEventTag[Event], CompletionStage[Done]] ): ReadSideHandlerBuilder[Event] = { prepareCallback = callback.apply this } override def setEventHandler[E <: Event]( eventClass: Class[E], handler: Function[E, CompletionStage[util.List[BoundStatement]]] ): ReadSideHandlerBuilder[Event] = { handlers += (eventClass -> ((event: E, offset: Offset) => handler(event))) this } override def setEventHandler[E <: Event]( eventClass: Class[E], handler: BiFunction[E, Offset, CompletionStage[util.List[BoundStatement]]] ): ReadSideHandlerBuilder[Event] = { handlers += (eventClass -> handler.apply _) this } override def build(): ReadSideHandler[Event] = { new CassandraAutoReadSideHandler[Event]( session, offsetStore, handlers, globalPrepareCallback, prepareCallback, eventProcessorId, dispatcher ) } } } }
Example 6
Source File: SessionDirectives.scala From akka-http-session with Apache License 2.0 | 5 votes |
package com.softwaremill.session.javadsl import java.util.Optional import java.util.function.Supplier import akka.http.javadsl.server.Route import akka.http.javadsl.server.directives.RouteAdapter import com.softwaremill.session._ import scala.compat.java8.OptionConverters._ trait SessionDirectives extends OneOffSessionDirectives with RefreshableSessionDirectives { def session[T](sc: SessionContinuity[T], st: GetSessionTransport, inner: java.util.function.Function[SessionResult[T], Route]): Route = RouteAdapter { com.softwaremill.session.SessionDirectives.session(sc, st) { sessionResult => inner.apply(sessionResult).asInstanceOf[RouteAdapter].delegate } } def setSession[T](sc: SessionContinuity[T], st: SetSessionTransport, v: T, inner: Supplier[Route]): Route = RouteAdapter { com.softwaremill.session.SessionDirectives.setSession(sc, st, v) { inner.get.asInstanceOf[RouteAdapter].delegate } } def invalidateSession[T](sc: SessionContinuity[T], st: GetSessionTransport, inner: Supplier[Route]): Route = RouteAdapter { com.softwaremill.session.SessionDirectives.invalidateSession(sc, st) { inner.get.asInstanceOf[RouteAdapter].delegate } } def optionalSession[T](sc: SessionContinuity[T], st: GetSessionTransport, inner: java.util.function.Function[Optional[T], Route]): Route = RouteAdapter { com.softwaremill.session.SessionDirectives.optionalSession(sc, st) { session => inner.apply(session.asJava).asInstanceOf[RouteAdapter].delegate } } def requiredSession[T](sc: SessionContinuity[T], st: GetSessionTransport, inner: java.util.function.Function[T, Route]): Route = RouteAdapter { com.softwaremill.session.SessionDirectives.requiredSession(sc, st) { session => inner.apply(session).asInstanceOf[RouteAdapter].delegate } } def touchRequiredSession[T](sc: SessionContinuity[T], st: GetSessionTransport, inner: java.util.function.Function[T, Route]): Route = RouteAdapter { com.softwaremill.session.SessionDirectives.touchRequiredSession(sc, st) { session => inner.apply(session).asInstanceOf[RouteAdapter].delegate } } } object SessionDirectives extends SessionDirectives
Example 7
Source File: Streams.scala From haystack-trends with Apache License 2.0 | 5 votes |
package com.expedia.www.haystack.trends import java.util.function.Supplier import com.expedia.metrics.MetricData import com.expedia.open.tracing.Span import com.expedia.www.haystack.commons.kstreams.serde.SpanSerde import com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricTankSerde import com.expedia.www.haystack.commons.util.MetricDefinitionKeyGenerator._ import com.expedia.www.haystack.trends.config.entities.{KafkaConfiguration, TransformerConfiguration} import com.expedia.www.haystack.trends.transformer.MetricDataTransformer.allTransformers import org.apache.kafka.common.serialization.Serdes.StringSerde import org.apache.kafka.streams._ import org.apache.kafka.streams.kstream.Produced import scala.collection.JavaConverters._ class Streams(kafkaConfig: KafkaConfiguration, transformConfig: TransformerConfiguration) extends Supplier[Topology] with MetricDataGenerator { private[trends] def initialize(builder: StreamsBuilder): Topology = { val consumed = Consumed.`with`(kafkaConfig.autoOffsetReset) .withKeySerde(new StringSerde) .withValueSerde(new SpanSerde) .withTimestampExtractor(kafkaConfig.timestampExtractor) builder .stream(kafkaConfig.consumeTopic, consumed) .filter((_: String, span: Span) => isValidSpan(span, transformConfig.blacklistedServices)) .flatMap[String, MetricData]((_: String, span: Span) => mapToMetricDataKeyValue(span)) .to(kafkaConfig.produceTopic, Produced.`with`(new StringSerde(), new MetricTankSerde())) builder.build() } private def mapToMetricDataKeyValue(span: Span): java.lang.Iterable[KeyValue[String, MetricData]] = { val metricData: Seq[MetricData] = generateMetricDataList(span, allTransformers, transformConfig.encoder, transformConfig.enableMetricPointServiceLevelGeneration) metricData.map { md => new KeyValue[String, MetricData](generateKey(md.getMetricDefinition), md) }.asJavaCollection } override def get(): Topology = { val builder = new StreamsBuilder() initialize(builder) } }
Example 8
Source File: Streams.scala From haystack-trends with Apache License 2.0 | 5 votes |
package com.expedia.www.haystack.trends.kstream import java.util.function.Supplier import com.expedia.metrics.MetricData import com.expedia.www.haystack.commons.kstreams.serde.metricdata.{MetricDataSerde, MetricTankSerde} import com.expedia.www.haystack.trends.aggregation.TrendMetric import com.expedia.www.haystack.trends.config.AppConfiguration import com.expedia.www.haystack.trends.kstream.processor.{AdditionalTagsProcessorSupplier, ExternalKafkaProcessorSupplier, MetricAggProcessorSupplier} import com.expedia.www.haystack.trends.kstream.store.HaystackStoreBuilder import org.apache.kafka.common.serialization.{Serde, StringDeserializer, StringSerializer} import org.apache.kafka.streams.Topology import org.apache.kafka.streams.state.{KeyValueStore, StoreBuilder} import org.slf4j.LoggerFactory import scala.collection.JavaConverters class Streams(appConfiguration: AppConfiguration) extends Supplier[Topology] { private val LOGGER = LoggerFactory.getLogger(classOf[Streams]) private val TOPOLOGY_SOURCE_NAME = "metricpoint-source" private val TOPOLOGY_EXTERNAL_SINK_NAME = "metricpoint-aggegated-sink-external" private val TOPOLOGY_INTERNAL_SINK_NAME = "metric-data-aggegated-sink-internal" private val TOPOLOGY_AGGREGATOR_PROCESSOR_NAME = "metricpoint-aggregator-process" private val TOPOLOGY_ADDITIONAL_TAGS_PROCESSOR_NAME = "additional-tags-process" private val TOPOLOGY_AGGREGATOR_TREND_METRIC_STORE_NAME = "trend-metric-store" private val kafkaConfig = appConfiguration.kafkaConfig private def initialize(topology: Topology): Topology = { //add source - topic where the raw metricpoints are pushed by the span-timeseries-transformer topology.addSource( kafkaConfig.autoOffsetReset, TOPOLOGY_SOURCE_NAME, kafkaConfig.timestampExtractor, new StringDeserializer, new MetricTankSerde().deserializer(), kafkaConfig.consumeTopic) //The processor which performs aggregations on the metrics topology.addProcessor( TOPOLOGY_AGGREGATOR_PROCESSOR_NAME, new MetricAggProcessorSupplier(TOPOLOGY_AGGREGATOR_TREND_METRIC_STORE_NAME, appConfiguration.encoder), TOPOLOGY_SOURCE_NAME) //key-value, state store associated with each kstreams task(partition) // which keeps the trend-metrics which are currently being computed in memory topology.addStateStore(createTrendMetricStateStore(), TOPOLOGY_AGGREGATOR_PROCESSOR_NAME) // topology to add additional tags if any topology.addProcessor(TOPOLOGY_ADDITIONAL_TAGS_PROCESSOR_NAME, new AdditionalTagsProcessorSupplier(appConfiguration.additionalTags), TOPOLOGY_AGGREGATOR_PROCESSOR_NAME) if (appConfiguration.kafkaConfig.producerConfig.enableExternalKafka) { topology.addProcessor( TOPOLOGY_EXTERNAL_SINK_NAME, new ExternalKafkaProcessorSupplier(appConfiguration.kafkaConfig.producerConfig), TOPOLOGY_ADDITIONAL_TAGS_PROCESSOR_NAME ) } // adding sinks appConfiguration.kafkaConfig.producerConfig.kafkaSinkTopics.foreach(sinkTopic => { if(sinkTopic.enabled){ val serde = Class.forName(sinkTopic.serdeClassName).newInstance().asInstanceOf[Serde[MetricData]] topology.addSink( s"${TOPOLOGY_INTERNAL_SINK_NAME}-${sinkTopic.topic}", sinkTopic.topic, new StringSerializer, serde.serializer(), TOPOLOGY_ADDITIONAL_TAGS_PROCESSOR_NAME) } }) topology } private def createTrendMetricStateStore(): StoreBuilder[KeyValueStore[String, TrendMetric]] = { val stateStoreConfiguration = appConfiguration.stateStoreConfig val storeBuilder = new HaystackStoreBuilder(TOPOLOGY_AGGREGATOR_TREND_METRIC_STORE_NAME, stateStoreConfiguration.stateStoreCacheSize) if (stateStoreConfiguration.enableChangeLogging) { storeBuilder .withLoggingEnabled(JavaConverters.mapAsJavaMap(stateStoreConfiguration.changeLogTopicConfiguration)) } else { storeBuilder .withLoggingDisabled() } } override def get(): Topology = { val topology = new Topology initialize(topology) } }
Example 9
Source File: RemoteCacheManagerBuilder.scala From infinispan-spark with Apache License 2.0 | 5 votes |
package org.infinispan.spark.rdd import java.net.InetSocketAddress import java.util.function.Supplier import org.infinispan.client.hotrod.configuration.ConfigurationBuilder import org.infinispan.client.hotrod.marshall.MarshallerUtil import org.infinispan.client.hotrod.{FailoverRequestBalancingStrategy, RemoteCacheManager} import org.infinispan.commons.marshall.{Marshaller, ProtoStreamMarshaller} import org.infinispan.protostream.FileDescriptorSource import org.infinispan.protostream.annotations.ProtoSchemaBuilder import org.infinispan.query.remote.client.ProtobufMetadataManagerConstants import org.infinispan.spark.config.ConnectorConfiguration import scala.collection.JavaConverters._ def create(cfg: ConnectorConfiguration, preferredAddress: InetSocketAddress): RemoteCacheManager = create(cfg, Some(preferredAddress)) private def create(cfg: ConnectorConfiguration, preferredAddress: Option[InetSocketAddress]) = { if (!cfg.usesProtobuf) new RemoteCacheManager(createBuilder(cfg, preferredAddress, None).build()) else createForQuery(cfg, preferredAddress) } private def createForQuery(cfg: ConnectorConfiguration, preferredAddress: Option[InetSocketAddress]) = { val builder = createBuilder(cfg, preferredAddress, Some(new ProtoStreamMarshaller)) val rcm = new RemoteCacheManager(builder.build()) buildSerializationContext(cfg, rcm) } private def createBuilder(cfg: ConnectorConfiguration, preferredAddress: Option[InetSocketAddress], marshaller: Option[Marshaller]) = { val configBuilder = new ConfigurationBuilder().withProperties(cfg.getHotRodClientProperties) def balancingStrategyFactory(a: InetSocketAddress) = new Supplier[FailoverRequestBalancingStrategy] { override def get(): FailoverRequestBalancingStrategy = new PreferredServerBalancingStrategy(a) } preferredAddress.foreach(balancingStrategyFactory) marshaller.foreach(m => configBuilder.marshaller(m)) configBuilder } private def buildSerializationContext(cfg: ConnectorConfiguration, cm: RemoteCacheManager) = { val metadataCache = cm.getCache[String, AnyRef](ProtobufMetadataManagerConstants.PROTOBUF_METADATA_CACHE_NAME) val autoRegister = cfg.getRegisterSchemas def buildDescriptorSource(descriptors: Map[String, String]): FileDescriptorSource = { val fileDescriptorSource = new FileDescriptorSource descriptors.foldLeft(fileDescriptorSource) { case (fds, (fileName, contents)) => fds.addProtoFile(fileName, contents) } fileDescriptorSource } val serCtx = MarshallerUtil.getSerializationContext(cm) val protoDescriptors = cfg.getProtoFiles val marshallers = cfg.getMarshallers val protoAnnotatedEntities = cfg.getProtoEntities val descriptorSource = buildDescriptorSource(protoDescriptors) if (autoRegister) { descriptorSource.getFileDescriptors.asScala.foreach { case (name, contents) => metadataCache.put(name, new String(contents)) } } serCtx.registerProtoFiles(descriptorSource) marshallers.foreach { c => serCtx.registerMarshaller(c.newInstance()) } if (protoDescriptors.isEmpty) { val protoSchemaBuilder = new ProtoSchemaBuilder protoAnnotatedEntities.foreach { e => val fileName = s"${e.getName}.proto" val contents = protoSchemaBuilder.fileName(fileName).addClass(e).build(serCtx) if (autoRegister) { metadataCache.put(fileName, contents) } } } cm } }
Example 10
Source File: CorsDirectives.scala From akka-http-cors with Apache License 2.0 | 5 votes |
package ch.megard.akka.http.cors.javadsl import java.util.function.Supplier import akka.http.javadsl.server.{RejectionHandler, Route} import akka.http.javadsl.server.directives.RouteAdapter import ch.megard.akka.http.cors.javadsl.settings.CorsSettings import ch.megard.akka.http.cors.scaladsl object CorsDirectives { def cors(inner: Supplier[Route]): Route = RouteAdapter { scaladsl.CorsDirectives.cors() { inner.get() match { case ra: RouteAdapter => ra.delegate } } } def cors(settings: CorsSettings, inner: Supplier[Route]): Route = RouteAdapter { // Currently the easiest way to go from Java models to their Scala equivalent is to cast. // See https://github.com/akka/akka-http/issues/661 for a potential opening of the JavaMapping API. val scalaSettings = settings.asInstanceOf[scaladsl.settings.CorsSettings] scaladsl.CorsDirectives.cors(scalaSettings) { inner.get() match { case ra: RouteAdapter => ra.delegate } } } def corsRejectionHandler: RejectionHandler = new RejectionHandler(scaladsl.CorsDirectives.corsRejectionHandler) }
Example 11
Source File: MockTime.scala From kafka-connect-sap with Apache License 2.0 | 5 votes |
package com.sap.kafka.connect.source import java.lang import java.util.concurrent.TimeUnit import java.util.function.Supplier import org.apache.kafka.common.utils.Time class MockTime extends Time { private var nanos = System.nanoTime() private val autoTickMs = 0 override def milliseconds(): Long = { sleep(autoTickMs) TimeUnit.MILLISECONDS.convert(this.nanos, TimeUnit.NANOSECONDS) } override def nanoseconds(): Long = { sleep(autoTickMs) nanos } override def hiResClockMs(): Long = ??? override def sleep(ms: Long): Unit = { this.nanos += TimeUnit.NANOSECONDS.convert(ms, TimeUnit.MILLISECONDS) } override def waitObject(o: Any, supplier: Supplier[lang.Boolean], l: Long): Unit = ??? }