java.util.Objects Scala Examples
The following examples show how to use java.util.Objects.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: ScAbstractType.scala From intellij-lsp with Apache License 2.0 | 5 votes |
package org.jetbrains.plugins.scala.lang.psi.types import java.util.Objects import org.jetbrains.plugins.scala.lang.psi.types.api._ import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.NonValueType import org.jetbrains.plugins.scala.lang.psi.types.recursiveUpdate.{RecursiveUpdateException, Update} import org.jetbrains.plugins.scala.project.ProjectContext case class ScAbstractType(parameterType: TypeParameterType, lower: ScType, upper: ScType) extends ScalaType with NonValueType { override implicit def projectContext: ProjectContext = parameterType.projectContext private var hash: Int = -1 override def hashCode: Int = { if (hash == -1) hash = Objects.hash(upper, lower, parameterType.arguments) hash } override def equals(obj: scala.Any): Boolean = { obj match { case ScAbstractType(oTpt, oLower, oUpper) => lower.equals(oLower) && upper.equals(oUpper) && parameterType.arguments.equals(oTpt.arguments) case _ => false } } override def equivInner(r: ScType, uSubst: ScUndefinedSubstitutor, falseUndef: Boolean): (Boolean, ScUndefinedSubstitutor) = { r match { case _ if falseUndef => (false, uSubst) case _ => var t: (Boolean, ScUndefinedSubstitutor) = r.conforms(upper, uSubst) if (!t._1) return (false, uSubst) t = lower.conforms(r, t._2) if (!t._1) return (false, uSubst) (true, t._2) } } def inferValueType: TypeParameterType = parameterType def simplifyType: ScType = { if (upper.equiv(Any)) lower else if (lower.equiv(Nothing)) upper else lower } override def removeAbstracts: ScType = simplifyType override def updateSubtypes(update: Update, visited: Set[ScType]): ScAbstractType = { try { ScAbstractType( parameterType.recursiveUpdateImpl(update, visited).asInstanceOf[TypeParameterType], lower.recursiveUpdateImpl(update, visited), upper.recursiveUpdateImpl(update, visited) ) } catch { case _: ClassCastException => throw new RecursiveUpdateException } } override def recursiveVarianceUpdateModifiable[T](data: T, update: (ScType, Variance, T) => (Boolean, ScType, T), v: Variance = Covariant, revertVariances: Boolean = false): ScType = { update(this, v, data) match { case (true, res, _) => res case (_, _, newData) => try { ScAbstractType(parameterType.recursiveVarianceUpdateModifiable(newData, update, v).asInstanceOf[TypeParameterType], lower.recursiveVarianceUpdateModifiable(newData, update, -v), upper.recursiveVarianceUpdateModifiable(newData, update, v)) } catch { case _: ClassCastException => throw new RecursiveUpdateException } } } override def visitType(visitor: TypeVisitor): Unit = visitor match { case scalaVisitor: ScalaTypeVisitor => scalaVisitor.visitAbstractType(this) case _ => } }
Example 2
Source File: ModelStateStoreBuilder.scala From kafka-with-akka-streams-kafka-streams-tutorial with Apache License 2.0 | 5 votes |
package com.lightbend.scala.kafkastreams.store.store.custom import java.util import java.util.{Objects} import org.apache.kafka.streams.state.StoreBuilder class ModelStateStoreBuilder(nm: String) extends StoreBuilder [ModelStateStore]{ Objects.requireNonNull(nm, "name can't be null") var lConfig: util.Map[String, String] = new util.HashMap[String, String] var enableCaching: Boolean = false var enableLogging: Boolean = true override def build : ModelStateStore = new ModelStateStore(nm, enableLogging) override def withCachingEnabled: ModelStateStoreBuilder = { enableCaching = true this } override def withLoggingEnabled(config: util.Map[String, String]): ModelStateStoreBuilder = { Objects.requireNonNull(config, "config can't be null") enableLogging = true lConfig = config this } override def withLoggingDisabled: ModelStateStoreBuilder = { enableLogging = false lConfig.clear() this } override def logConfig: util.Map[String, String] = lConfig override def loggingEnabled: Boolean = enableLogging override def name: String = nm }
Example 3
Source File: AvlTreeData.scala From sigmastate-interpreter with MIT License | 5 votes |
package sigmastate import java.util import java.util.{Arrays, Objects} import scorex.crypto.authds.ADDigest import sigmastate.interpreter.CryptoConstants import sigmastate.serialization.SigmaSerializer import sigmastate.utils.{SigmaByteReader, SigmaByteWriter} case class AvlTreeFlags(insertAllowed: Boolean, updateAllowed: Boolean, removeAllowed: Boolean) { def serializeToByte: Byte = AvlTreeFlags.serializeFlags(this) } object AvlTreeFlags { lazy val ReadOnly = AvlTreeFlags(insertAllowed = false, updateAllowed = false, removeAllowed = false) lazy val AllOperationsAllowed = AvlTreeFlags(insertAllowed = true, updateAllowed = true, removeAllowed = true) lazy val InsertOnly = AvlTreeFlags(insertAllowed = true, updateAllowed = false, removeAllowed = false) lazy val RemoveOnly = AvlTreeFlags(insertAllowed = false, updateAllowed = false, removeAllowed = true) def apply(serializedFlags: Byte): AvlTreeFlags = { val insertAllowed = (serializedFlags & 0x01) != 0 val updateAllowed = (serializedFlags & 0x02) != 0 val removeAllowed = (serializedFlags & 0x04) != 0 AvlTreeFlags(insertAllowed, updateAllowed, removeAllowed) } def serializeFlags(avlTreeFlags: AvlTreeFlags): Byte = { val readOnly = 0 val i = if(avlTreeFlags.insertAllowed) readOnly | 0x01 else readOnly val u = if(avlTreeFlags.updateAllowed) i | 0x02 else i val r = if(avlTreeFlags.removeAllowed) u | 0x04 else u r.toByte } } case class AvlTreeData(digest: ADDigest, treeFlags: AvlTreeFlags, keyLength: Int, valueLengthOpt: Option[Int] = None) { override def equals(arg: Any): Boolean = arg match { case x: AvlTreeData => Arrays.equals(digest, x.digest) && keyLength == x.keyLength && valueLengthOpt == x.valueLengthOpt && treeFlags == x.treeFlags case _ => false } override def hashCode(): Int = (util.Arrays.hashCode(digest) * 31 + keyLength.hashCode()) * 31 + Objects.hash(valueLengthOpt, treeFlags) } object AvlTreeData { val DigestSize: Int = CryptoConstants.hashLength + 1 //please read class comments above for details val TreeDataSize = DigestSize + 3 + 4 + 4 val dummy = new AvlTreeData( ADDigest @@ Array.fill(DigestSize)(0:Byte), AvlTreeFlags.AllOperationsAllowed, keyLength = 32) object serializer extends SigmaSerializer[AvlTreeData, AvlTreeData] { override def serialize(data: AvlTreeData, w: SigmaByteWriter): Unit = { val tf = AvlTreeFlags.serializeFlags(data.treeFlags) w.putBytes(data.digest) .putUByte(tf) .putUInt(data.keyLength) .putOption(data.valueLengthOpt)(_.putUInt(_)) } override def parse(r: SigmaByteReader): AvlTreeData = { val digest = r.getBytes(DigestSize) val tf = AvlTreeFlags(r.getByte()) val keyLength = r.getUInt().toInt val valueLengthOpt = r.getOption(r.getUInt().toInt) AvlTreeData(ADDigest @@ digest, tf, keyLength, valueLengthOpt) } } }
Example 4
Source File: ExactQuery.scala From elastiknn with Apache License 2.0 | 5 votes |
package com.klibisz.elastiknn.query import java.util.Objects import com.klibisz.elastiknn.ELASTIKNN_NAME import com.klibisz.elastiknn.api.Vec import com.klibisz.elastiknn.models.ExactSimilarityFunction import com.klibisz.elastiknn.storage.StoredVec import org.apache.lucene.document.BinaryDocValuesField import org.apache.lucene.index.{IndexableField, LeafReaderContext} import org.apache.lucene.search.{DocValuesFieldExistsQuery, Explanation} import org.apache.lucene.util.BytesRef import org.elasticsearch.common.lucene.search.function._ object ExactQuery { private class ExactScoreFunction[V <: Vec, S <: StoredVec](val field: String, val queryVec: V, val simFunc: ExactSimilarityFunction[V, S])( implicit codec: StoredVec.Codec[V, S]) extends ScoreFunction(CombineFunction.REPLACE) { override def getLeafScoreFunction(ctx: LeafReaderContext): LeafScoreFunction = { val vecDocVals = ctx.reader.getBinaryDocValues(vectorDocValuesField(field)) new LeafScoreFunction { override def score(docId: Int, subQueryScore: Float): Double = if (vecDocVals.advanceExact(docId)) { val binVal = vecDocVals.binaryValue() val storedVec = codec.decode(binVal.bytes, binVal.offset, binVal.length) simFunc(queryVec, storedVec) } else throw new RuntimeException(s"Couldn't advance to doc with id [$docId]") override def explainScore(docId: Int, subQueryScore: Explanation): Explanation = { Explanation.`match`(100, s"Elastiknn exact query") } } } override def needsScores(): Boolean = false override def doEquals(other: ScoreFunction): Boolean = other match { case f: ExactScoreFunction[V, S] => field == f.field && queryVec == f.queryVec && simFunc == f.simFunc case _ => false } override def doHashCode(): Int = Objects.hash(field, queryVec, simFunc) } def apply[V <: Vec, S <: StoredVec](field: String, queryVec: V, simFunc: ExactSimilarityFunction[V, S])( implicit codec: StoredVec.Codec[V, S]): FunctionScoreQuery = { val subQuery = new DocValuesFieldExistsQuery(vectorDocValuesField(field)) val func = new ExactScoreFunction(field, queryVec, simFunc) new FunctionScoreQuery(subQuery, func) } // Docvalue fields can have a custom name, but "regular" values (e.g. Terms) must keep the name of the field. def vectorDocValuesField(field: String): String = s"$field.$ELASTIKNN_NAME.vector" def index[V <: Vec: StoredVec.Encoder](field: String, vec: V): Seq[IndexableField] = { val bytes = implicitly[StoredVec.Encoder[V]].apply(vec) Seq(new BinaryDocValuesField(vectorDocValuesField(field), new BytesRef(bytes))) } }
Example 5
Source File: CustomShuffledRDD.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler import java.util.Arrays import java.util.Objects import org.apache.spark._ import org.apache.spark.rdd.RDD class CustomShuffledRDD[K, V, C]( var dependency: ShuffleDependency[K, V, C], partitionStartIndices: Array[Int]) extends RDD[(K, C)](dependency.rdd.context, Seq(dependency)) { def this(dep: ShuffleDependency[K, V, C]) = { this(dep, (0 until dep.partitioner.numPartitions).toArray) } override def getDependencies: Seq[Dependency[_]] = List(dependency) override val partitioner = { Some(new CoalescedPartitioner(dependency.partitioner, partitionStartIndices)) } override def getPartitions: Array[Partition] = { val n = dependency.partitioner.numPartitions Array.tabulate[Partition](partitionStartIndices.length) { i => val startIndex = partitionStartIndices(i) val endIndex = if (i < partitionStartIndices.length - 1) partitionStartIndices(i + 1) else n new CustomShuffledRDDPartition(i, startIndex, endIndex) } } override def compute(p: Partition, context: TaskContext): Iterator[(K, C)] = { val part = p.asInstanceOf[CustomShuffledRDDPartition] SparkEnv.get.shuffleManager.getReader( dependency.shuffleHandle, part.startIndexInParent, part.endIndexInParent, context) .read() .asInstanceOf[Iterator[(K, C)]] } override def clearDependencies() { super.clearDependencies() dependency = null } }
Example 6
Source File: DataSourceReaderHolder.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.datasources.v2 import java.util.Objects import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.sources.v2.reader._ private def metadata: Seq[Any] = { val filters: Any = reader match { case s: SupportsPushDownCatalystFilters => s.pushedCatalystFilters().toSet case s: SupportsPushDownFilters => s.pushedFilters().toSet case _ => Nil } Seq(output, reader.getClass, filters) } def canEqual(other: Any): Boolean override def equals(other: Any): Boolean = other match { case other: DataSourceReaderHolder => canEqual(other) && metadata.length == other.metadata.length && metadata.zip(other.metadata).forall { case (l, r) => l == r } case _ => false } override def hashCode(): Int = { metadata.map(Objects.hashCode).foldLeft(0)((a, b) => 31 * a + b) } }
Example 7
Source File: Handle.scala From kyuubi with Apache License 2.0 | 5 votes |
package yaooqinn.kyuubi.cli import java.util.Objects import org.apache.hive.service.cli.thrift.THandleIdentifier abstract class Handle(val handleId: HandleIdentifier) { def this() = this(new HandleIdentifier()) def this(tHandleIdentifier: THandleIdentifier) = this(new HandleIdentifier(tHandleIdentifier)) def getHandleIdentifier: HandleIdentifier = handleId override def hashCode: Int = 31 * 1 + Objects.hashCode(handleId) override def equals(obj: Any): Boolean = { obj match { case o: Handle => Objects.equals(handleId, o.handleId) case _ => false } } }
Example 8
Source File: HandleIdentifier.scala From kyuubi with Apache License 2.0 | 5 votes |
package yaooqinn.kyuubi.cli import java.nio.ByteBuffer import java.util.{Objects, UUID} import org.apache.hive.service.cli.thrift.THandleIdentifier case class HandleIdentifier(publicId: UUID, secretId: UUID) { def this() = this(UUID.randomUUID(), UUID.randomUUID()) def this(guid: ByteBuffer, secret: ByteBuffer) = this(Option(guid).map(id => new UUID(id.getLong(), id.getLong())).getOrElse(UUID.randomUUID()), Option(secret).map(id => new UUID(id.getLong(), id.getLong())).getOrElse(UUID.randomUUID())) def this(tHandleId: THandleIdentifier) = this(ByteBuffer.wrap(tHandleId.getGuid), ByteBuffer.wrap(tHandleId.getSecret)) def getPublicId: UUID = this.publicId def getSecretId: UUID = this.secretId def toTHandleIdentifier: THandleIdentifier = { val guid = new Array[Byte](16) val gBuff = ByteBuffer.wrap(guid) val secret = new Array[Byte](16) val sBuff = ByteBuffer.wrap(secret) gBuff.putLong(publicId.getMostSignificantBits) gBuff.putLong(publicId.getLeastSignificantBits) sBuff.putLong(secretId.getMostSignificantBits) sBuff.putLong(secretId.getLeastSignificantBits) new THandleIdentifier(ByteBuffer.wrap(guid), ByteBuffer.wrap(secret)) } override def hashCode: Int = { val prime = 31 var result = 1 result = prime * result + (if (publicId == null) 0 else publicId.hashCode) result = prime * result + (if (secretId == null) 0 else secretId.hashCode) result } override def equals(obj: Any): Boolean = { obj match { case HandleIdentifier(pid, sid) if Objects.equals(publicId, pid) && Objects.equals(secretId, sid) => true case _ => false } } override def toString: String = Option(publicId).map(_.toString).getOrElse("") }
Example 9
Source File: OperationHandle.scala From kyuubi with Apache License 2.0 | 5 votes |
package yaooqinn.kyuubi.operation import java.util.Objects import org.apache.hive.service.cli.thrift.{TOperationHandle, TProtocolVersion} import yaooqinn.kyuubi.cli.{Handle, HandleIdentifier} class OperationHandle private( opType: OperationType, protocol: TProtocolVersion, handleId: HandleIdentifier) extends Handle(handleId) { private var hasResultSet: Boolean = false def this(opType: OperationType, protocol: TProtocolVersion) = this(opType, protocol, new HandleIdentifier) def this(tOperationHandle: TOperationHandle, protocol: TProtocolVersion) = { this( OperationType.getOperationType(tOperationHandle.getOperationType), protocol, new HandleIdentifier(tOperationHandle.getOperationId)) setHasResultSet(tOperationHandle.isHasResultSet) } def this(tOperationHandle: TOperationHandle) = this(tOperationHandle, TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1) def getOperationType: OperationType = opType def toTOperationHandle: TOperationHandle = { val tOperationHandle = new TOperationHandle tOperationHandle.setOperationId(getHandleIdentifier.toTHandleIdentifier) tOperationHandle.setOperationType(opType.toTOperationType) tOperationHandle.setHasResultSet(this.hasResultSet) tOperationHandle } def setHasResultSet(hasResultSet: Boolean): Unit = { this.hasResultSet = hasResultSet } def isHasResultSet: Boolean = this.hasResultSet def getProtocolVersion: TProtocolVersion = protocol override def hashCode: Int = 31 * super.hashCode + Objects.hashCode(opType) override def equals(obj: Any): Boolean = { obj match { case o: OperationHandle if opType == o.getOperationType && super.equals(o) => true case _ => false } } override def toString: String = "OperationHandle [opType=" + opType + ", getHandleIdentifier()=" + getHandleIdentifier + "]" }
Example 10
Source File: CustomShuffledRDD.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler import java.util.Arrays import java.util.Objects import org.apache.spark._ import org.apache.spark.rdd.RDD import org.apache.spark.util.Utils class CustomShuffledRDD[K, V, C]( var dependency: ShuffleDependency[K, V, C], partitionStartIndices: Array[Int]) extends RDD[(K, C)](dependency.rdd.context, Seq(dependency)) { def this(dep: ShuffleDependency[K, V, C]) = { this(dep, (0 until dep.partitioner.numPartitions).toArray) } override def getDependencies: Seq[Dependency[_]] = List(dependency) override val partitioner = { Some(new CoalescedPartitioner(dependency.partitioner, partitionStartIndices)) } override def getPartitions: Array[Partition] = { val n = dependency.partitioner.numPartitions Array.tabulate[Partition](partitionStartIndices.length) { i => val startIndex = partitionStartIndices(i) val endIndex = if (i < partitionStartIndices.length - 1) partitionStartIndices(i + 1) else n new CustomShuffledRDDPartition(i, startIndex, endIndex) } } override def compute(p: Partition, context: TaskContext): Iterator[(K, C)] = { val part = p.asInstanceOf[CustomShuffledRDDPartition] val user = Utils.getCurrentUserName() SparkEnv.get(user).shuffleManager.getReader( dependency.shuffleHandle, part.startIndexInParent, part.endIndexInParent, context) .read() .asInstanceOf[Iterator[(K, C)]] } override def clearDependencies() { super.clearDependencies() dependency = null } }
Example 11
Source File: SerializableFileStatus.scala From delta with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.delta.util import java.util.Objects import org.apache.hadoop.fs.{FileStatus, LocatedFileStatus, Path} case class SerializableFileStatus( path: String, length: Long, isDir: Boolean, modificationTime: Long) { // Important note! This is very expensive to compute, but we don't want to cache it // as a `val` because Paths internally contain URIs and therefore consume lots of memory. def getPath: Path = new Path(path) def getLen: Long = length def getModificationTime: Long = modificationTime def isDirectory: Boolean = isDir def toFileStatus: FileStatus = { new LocatedFileStatus( new FileStatus(length, isDir, 0, 0, modificationTime, new Path(path)), null) } override def equals(obj: Any): Boolean = obj match { case other: SerializableFileStatus => // We only compare the paths to stay consistent with FileStatus.equals. Objects.equals(path, other.path) case _ => false } override def hashCode(): Int = { // We only use the path to stay consistent with FileStatus.hashCode. Objects.hashCode(path) } } object SerializableFileStatus { def fromStatus(status: FileStatus): SerializableFileStatus = { SerializableFileStatus( Option(status.getPath).map(_.toString).orNull, status.getLen, status.isDirectory, status.getModificationTime) } val EMPTY: SerializableFileStatus = fromStatus(new FileStatus()) }
Example 12
Source File: PoolConfigs.scala From bandar-log with Apache License 2.0 | 5 votes |
package com.aol.one.dwh.infra.sql.pool import java.util.Objects import com.aol.one.dwh.infra.config.RichConfig._ import com.aol.one.dwh.infra.config.{ConnectorConfig, JdbcConfig} import com.aol.one.dwh.infra.sql.pool.SqlSource._ import com.facebook.presto.jdbc.PrestoDriver import com.typesafe.config.Config import com.zaxxer.hikari.HikariConfig object SqlSource { val VERTICA = "vertica" val PRESTO = "presto" val GLUE = "glue" } object PoolConfig { def apply(connectorConf: ConnectorConfig, mainConf: Config): HikariConfig = { connectorConf.connectorType match { case VERTICA => VerticaPoolConfig(mainConf.getJdbcConfig(connectorConf.configId)) case PRESTO => PrestoPoolConfig(mainConf.getJdbcConfig(connectorConf.configId)) case _ => throw new IllegalArgumentException(s"Unsupported connector type:[${connectorConf.connectorType}]") } } } private object PrestoPoolConfig { def apply(jdbcConfig: JdbcConfig): HikariConfig = { val config: HikariConfig = new HikariConfig config.setPoolName(s"presto-pool-${jdbcConfig.dbName}") config.setDriverClassName(classOf[PrestoDriver].getName) config.setJdbcUrl(s"jdbc:presto://${jdbcConfig.host}:${jdbcConfig.port}/hive/${jdbcConfig.dbName}") config.setUsername(jdbcConfig.username) config.setMaximumPoolSize(jdbcConfig.maxPoolSize) config.setConnectionTimeout(jdbcConfig.connectionTimeout) config.setReadOnly(true) config } } private object VerticaPoolConfig { def apply(jdbcConfig: JdbcConfig): HikariConfig = { val verticaUrl = { val baseUri = s"jdbc:vertica://${jdbcConfig.host}:${jdbcConfig.port}/${jdbcConfig.dbName}" val schema = if (Objects.nonNull(jdbcConfig.schema)) "?connsettings=SET SEARCH_PATH TO " + jdbcConfig.schema else "" val ssl = if (jdbcConfig.useSsl) "&ssl=true" else "" baseUri + schema + ssl } val config: HikariConfig = new HikariConfig config.setPoolName(s"vertica-pool-${jdbcConfig.dbName}") config.setDriverClassName(classOf[com.vertica.jdbc.Driver].getName) config.setJdbcUrl(verticaUrl) config.setUsername(jdbcConfig.username) config.setPassword(jdbcConfig.password) config.setMaximumPoolSize(jdbcConfig.maxPoolSize) config.setConnectionTimeout(jdbcConfig.connectionTimeout) config.setConnectionTestQuery("SELECT 1") config.setReadOnly(true) config.setAutoCommit(false) config } }
Example 13
Source File: LWWRegisterImpl.scala From cloudstate with Apache License 2.0 | 5 votes |
package io.cloudstate.javasupport.impl.crdt import java.util.Objects import io.cloudstate.javasupport.crdt.LWWRegister import io.cloudstate.javasupport.impl.AnySupport import io.cloudstate.protocol.crdt.{CrdtClock, CrdtDelta, CrdtState, LWWRegisterDelta, LWWRegisterState} import com.google.protobuf.any.{Any => ScalaPbAny} private[crdt] final class LWWRegisterImpl[T](anySupport: AnySupport) extends InternalCrdt with LWWRegister[T] { override final val name = "LWWRegister" private var value: T = _ private var deltaValue: Option[ScalaPbAny] = None private var clock: LWWRegister.Clock = LWWRegister.Clock.DEFAULT private var customClockValue: Long = 0 override def set(value: T, clock: LWWRegister.Clock, customClockValue: Long): T = { Objects.requireNonNull(value) val old = this.value if (this.value != value) { deltaValue = Some(anySupport.encodeScala(value)) this.value = value } old } override def get(): T = value override def hasDelta: Boolean = deltaValue.isDefined override def delta: Option[CrdtDelta.Delta] = if (hasDelta) { Some(CrdtDelta.Delta.Lwwregister(LWWRegisterDelta(deltaValue, convertClock(clock), customClockValue))) } else None override def resetDelta(): Unit = { deltaValue = None clock = LWWRegister.Clock.DEFAULT customClockValue = 0 } override def state: CrdtState.State = CrdtState.State.Lwwregister( LWWRegisterState(Some(anySupport.encodeScala(value)), convertClock(clock), customClockValue) ) override val applyDelta = { case CrdtDelta.Delta.Lwwregister(LWWRegisterDelta(Some(any), _, _, _)) => this.value = anySupport.decode(any).asInstanceOf[T] } override val applyState = { case CrdtState.State.Lwwregister(LWWRegisterState(Some(any), _, _, _)) => this.value = anySupport.decode(any).asInstanceOf[T] } private def convertClock(clock: LWWRegister.Clock): CrdtClock = clock match { case LWWRegister.Clock.DEFAULT => CrdtClock.DEFAULT case LWWRegister.Clock.REVERSE => CrdtClock.REVERSE case LWWRegister.Clock.CUSTOM => CrdtClock.CUSTOM case LWWRegister.Clock.CUSTOM_AUTO_INCREMENT => CrdtClock.CUSTOM_AUTO_INCREMENT } override def toString = s"LWWRegister($value)" }
Example 14
Source File: PerformanceReport.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.it.performance import java.util.Objects import oharastream.ohara.common.setting.ObjectKey import oharastream.ohara.common.util.CommonUtils import scala.collection.immutable.ListMap import scala.collection.mutable trait PerformanceReport { def records: Map[Long, Map[String, Double]] } object PerformanceReport { def builder = new Builder final class Builder private[PerformanceReport] extends oharastream.ohara.common.pattern.Builder[PerformanceReport] { private[this] var key: ObjectKey = _ private[this] var className: String = _ private[this] val records = mutable.Map[Long, Map[String, Double]]() def connectorKey(key: ObjectKey): Builder = { this.key = Objects.requireNonNull(key) this } def className(className: String): Builder = { this.className = CommonUtils.requireNonEmpty(className) this } def resetValue(duration: Long, header: String): Builder = { records.put(duration, Map(header -> 0.0)) this } def record(duration: Long, header: String, value: Double): Builder = { val record = records.getOrElse(duration, Map(header -> 0.0)) records.put( duration, record + (header -> (record.getOrElse(header, 0.0) + value)) ) this } override def build: PerformanceReport = new PerformanceReport { override val className: String = CommonUtils.requireNonEmpty(Builder.this.className) override val records: Map[Long, Map[String, Double]] = ListMap( Builder.this.records.toSeq.sortBy(_._1)((x: Long, y: Long) => y.compare(x)): _* ) override def key: ObjectKey = Objects.requireNonNull(Builder.this.key) } } }
Example 15
Source File: ProcessOverSocketStreamConnectionProvider.scala From intellij-lsp with Apache License 2.0 | 5 votes |
package com.github.gtache.lsp.client.connection import java.io.{IOException, InputStream, OutputStream} import java.net.{ServerSocket, Socket} import java.util.Objects import com.intellij.openapi.diagnostic.Logger class ProcessOverSocketStreamConnectionProvider(commands: Seq[String], workingDir: String, port: Int = 0) extends ProcessStreamConnectionProvider(commands, workingDir) { import ProcessOverSocketStreamConnectionProvider._ private var socket: Socket = _ private var inputStream: InputStream = _ private var outputStream: OutputStream = _ @throws[IOException] override def start(): Unit = { val serverSocket = new ServerSocket(port) val socketThread = new Thread(() => { try socket = serverSocket.accept catch { case e: IOException => LOG.error(e) } finally try serverSocket.close() catch { case e: IOException => LOG.error(e) } }) socketThread.start() super.start() try { socketThread.join(5000) } catch { case e: InterruptedException => LOG.error(e) } if (socket == null) throw new IOException("Unable to make socket connection: " + toString) //$NON-NLS-1$ inputStream = socket.getInputStream outputStream = socket.getOutputStream } override def getInputStream: InputStream = inputStream override def getOutputStream: OutputStream = outputStream override def getErrorStream: InputStream = inputStream override def stop(): Unit = { super.stop() if (socket != null) try socket.close() catch { case e: IOException => LOG.error(e) } } override def hashCode: Int = { val result = super.hashCode result ^ Objects.hashCode(this.port) } } object ProcessOverSocketStreamConnectionProvider { private val LOG = Logger.getInstance(classOf[ProcessOverSocketStreamConnectionProvider]) }
Example 16
Source File: ProcessStreamConnectionProvider.scala From intellij-lsp with Apache License 2.0 | 5 votes |
package com.github.gtache.lsp.client.connection import java.io.{File, IOException, InputStream, OutputStream} import java.util.Objects import com.intellij.openapi.diagnostic.Logger import org.jetbrains.annotations.Nullable class ProcessStreamConnectionProvider(private var commands: Seq[String], private var workingDir: String) extends StreamConnectionProvider { private val LOG: Logger = Logger.getInstance(classOf[ProcessStreamConnectionProvider]) @Nullable private var process: Process = _ @throws[IOException] override def start(): Unit = { if (this.workingDir == null || this.commands == null || this.commands.isEmpty || this.commands.contains(null)) throw new IOException("Unable to start language server: " + this.toString) //$NON-NLS-1$ val builder = createProcessBuilder LOG.info("Starting server process with commands " + commands + " and workingDir " + workingDir) this.process = builder.start if (!process.isAlive) throw new IOException("Unable to start language server: " + this.toString) else LOG.info("Server process started " + process) } protected def createProcessBuilder: ProcessBuilder = { import scala.collection.JavaConverters._ val builder = new ProcessBuilder(getCommands.map(s => s.replace("\'", "")).asJava) builder.directory(new File(getWorkingDirectory)) builder } protected def getCommands: Seq[String] = commands def setCommands(commands: Seq[String]): Unit = { this.commands = commands } protected def getWorkingDirectory: String = workingDir def setWorkingDirectory(workingDir: String): Unit = { this.workingDir = workingDir } @Nullable override def getInputStream: InputStream = { if (process == null) null else process.getInputStream } @Nullable override def getOutputStream: OutputStream = { if (process == null) null else process.getOutputStream } @Nullable override def getErrorStream: InputStream = { if (process == null) null else process.getErrorStream } override def stop(): Unit = { if (process != null) process.destroy() } override def equals(obj: Any): Boolean = { obj match { case other: ProcessStreamConnectionProvider => getCommands.size == other.getCommands.size && this.getCommands.toSet == other.getCommands.toSet && this.getWorkingDirectory == other.getWorkingDirectory case _ => false } } override def hashCode: Int = { Objects.hashCode(this.getCommands) ^ Objects.hashCode(this.getWorkingDirectory) } }
Example 17
Source File: LogEventBroadcaster.scala From netty-in-action-scala with Apache License 2.0 | 5 votes |
package nia.chapter13 import io.netty.bootstrap.Bootstrap import io.netty.channel.{ ChannelOption, EventLoopGroup } import io.netty.channel.nio.NioEventLoopGroup import io.netty.channel.socket.nio.NioDatagramChannel import java.io.File import java.io.RandomAccessFile import java.net.InetSocketAddress import java.lang.{ Boolean ⇒ JBoolean } import java.util.Objects import scala.util.control.Breaks._ object LogEventBroadcaster { @throws[Exception] def main(args: Array[String]): Unit = { if (args.length != 2) throw new IllegalArgumentException //创建并启动一个新的 LogEventBroadcaster 的实例 val broadcaster = new LogEventBroadcaster(new InetSocketAddress("255.255.255.255", args(0).toInt), new File(args(1))) try { broadcaster.run() } finally { broadcaster.stop() } } } class LogEventBroadcaster(address: InetSocketAddress, file: File) { val group: EventLoopGroup = new NioEventLoopGroup val bootstrap = new Bootstrap //引导该 NioDatagramChannel(无连接的) bootstrap .group(group) .channel(classOf[NioDatagramChannel]) //设置 SO_BROADCAST 套接字选项 .option[JBoolean](ChannelOption.SO_BROADCAST, true) .handler(new LogEventEncoder(address)) @throws[Exception] def run(): Unit = { //绑定 Channel val ch = bootstrap.bind(0).sync.channel var pointer: Long = 0 //启动主处理循环 breakable { while (true) { val len = file.length if (len < pointer) { // file was reset //如果有必要,将文件指针设置到该文件的最后一个字节 pointer = len } else if (len > pointer) { // Content was added val raf = new RandomAccessFile(file, "r") //设置当前的文件指针,以确保没有任何的旧日志被发送 raf.seek(pointer) Iterator.continually(raf.readLine()) .takeWhile(Objects.nonNull) .foreach { line ⇒ ch.writeAndFlush(LogEvent(file.getAbsolutePath, line)) } //存储其在文件中的当前位置 pointer = raf.getFilePointer raf.close() } try { //休眠 1 秒,如果被中断,则退出循环;否则重新处理它 Thread.sleep(1000) } catch { case e: InterruptedException ⇒ Thread.interrupted break } } } } def stop(): Unit = { group.shutdownGracefully() } }
Example 18
Source File: FieldNameAndArguments.scala From sangria with Apache License 2.0 | 5 votes |
package sangria.validation.rules.experimental.overlappingfields import java.util import java.util.{Comparator, Objects} import sangria.ast import sangria.renderer.QueryRenderer final class FieldNameAndArguments(private val field: ast.Field) { private val fieldName: String = field.name private val arguments: util.ArrayList[(String, String)] = argumentsKey(field.arguments) override val hashCode: Int = { Objects.hash(fieldName, arguments) } override def equals(obj: Any): Boolean = { obj match { case other: FieldNameAndArguments => fieldName == other.fieldName && arguments == other.arguments case _ => false } } def conflictReason(other: FieldNameAndArguments): String = { if (fieldName != other.fieldName) { s"'$fieldName' and '${other.fieldName}' are different fields" } else if (arguments != other.arguments) { "of differing arguments" } else { throw new IllegalArgumentException("no conflict between keys") } } private def argumentsKey(arguments: Vector[ast.Argument]): util.ArrayList[(String, String)] = { val key = new util.ArrayList[(String, String)](arguments.size) arguments.foreach { argument => key.add(argument.name -> QueryRenderer.render(argument.value, QueryRenderer.Compact)) } key.sort(new Comparator[(String, String)] { override def compare(a: (String, String), b: (String, String)): Int = a._1.compareTo(b._1) }) key } }
Example 19
Source File: CustomShuffledRDD.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler import java.util.Arrays import java.util.Objects import org.apache.spark._ import org.apache.spark.rdd.RDD class CustomShuffledRDD[K, V, C]( var dependency: ShuffleDependency[K, V, C], partitionStartIndices: Array[Int]) extends RDD[(K, C)](dependency.rdd.context, Seq(dependency)) { def this(dep: ShuffleDependency[K, V, C]) = { this(dep, (0 until dep.partitioner.numPartitions).toArray) } override def getDependencies: Seq[Dependency[_]] = List(dependency) override val partitioner = { Some(new CoalescedPartitioner(dependency.partitioner, partitionStartIndices)) } override def getPartitions: Array[Partition] = { val n = dependency.partitioner.numPartitions Array.tabulate[Partition](partitionStartIndices.length) { i => val startIndex = partitionStartIndices(i) val endIndex = if (i < partitionStartIndices.length - 1) partitionStartIndices(i + 1) else n new CustomShuffledRDDPartition(i, startIndex, endIndex) } } override def compute(p: Partition, context: TaskContext): Iterator[(K, C)] = { val part = p.asInstanceOf[CustomShuffledRDDPartition] SparkEnv.get.shuffleManager.getReader( dependency.shuffleHandle, part.startIndexInParent, part.endIndexInParent, context) .read() .asInstanceOf[Iterator[(K, C)]] } override def clearDependencies() { super.clearDependencies() dependency = null } }
Example 20
Source File: ShapefileRelation.scala From magellan with Apache License 2.0 | 5 votes |
package magellan import java.util.Objects import magellan.io._ import magellan.mapreduce._ import org.apache.hadoop.io.{ArrayWritable, LongWritable, MapWritable, Text} import org.apache.spark.rdd.RDD import org.apache.spark.sql.SQLContext import scala.collection.JavaConversions._ import scala.util.Try case class ShapeFileRelation( path: String, parameters: Map[String, String]) (@transient val sqlContext: SQLContext) extends SpatialRelation { protected override def _buildScan(): RDD[Array[Any]] = { // read the shx files, if they exist val fileNameToFileSplits = Try(sc.newAPIHadoopFile( path + "/*.shx", classOf[ShxInputFormat], classOf[Text], classOf[ArrayWritable] ).map { case (txt: Text, splits: ArrayWritable) => val fileName = txt.toString val s = splits.get() val size = s.length var i = 0 val v = Array.fill(size)(0L) while (i < size) { v.update(i, s(i).asInstanceOf[LongWritable].get()) i += 1 } (fileName, v) }.collectAsMap()) fileNameToFileSplits.map(SplitInfos.SPLIT_INFO_MAP.set(_)) val shapefileRdd = sqlContext.sparkContext.newAPIHadoopFile( path + "/*.shp", classOf[ShapeInputFormat], classOf[ShapeKey], classOf[ShapeWritable] ) val dbaseRdd = sqlContext.sparkContext.newAPIHadoopFile( path + "/*.dbf", classOf[DBInputFormat], classOf[ShapeKey], classOf[MapWritable] ) val dataRdd = shapefileRdd.map { case (k, v) => ((k.getFileNamePrefix(), k.getRecordIndex()), v.shape) } val metadataRdd = dbaseRdd.map { case (k, v) => val meta = v.entrySet().map { kv => val k = kv.getKey.asInstanceOf[Text].toString val v = kv.getValue.asInstanceOf[Text].toString (k, v) }.toMap ((k.getFileNamePrefix(), k.getRecordIndex()), meta) } dataRdd.leftOuterJoin(metadataRdd).map(f => Array(f._2._1, f._2._2)) } override def hashCode(): Int = Objects.hash(path, schema) }
Example 21
Source File: JavaSourceMutableView.scala From rug with GNU General Public License v3.0 | 5 votes |
package com.atomist.rug.kind.java import java.util.Objects import com.atomist.rug.RugRuntimeException import com.atomist.rug.kind.core.{LazyFileArtifactBackedMutableView, ProjectMutableView} import com.atomist.rug.kind.java.JavaTypeType._ import com.atomist.rug.spi.{ExportFunction, ExportFunctionParameterDescription, MutableView} import com.atomist.source.FileArtifact import com.github.javaparser.JavaParser import com.github.javaparser.ast.body.ClassOrInterfaceDeclaration import com.github.javaparser.ast.expr.Name import com.github.javaparser.ast.{CompilationUnit, PackageDeclaration} import scala.collection.JavaConverters._ import scala.util.Try class JavaSourceMutableView(old: FileArtifact, parent: ProjectMutableView) extends LazyFileArtifactBackedMutableView(old, parent) { lazy val compilationUnit: Option[CompilationUnit] = { Try { // It's possible there'll be a parsing error. Hide it JavaParser.parse(old.inputStream) }.toOption } override protected def wellFormed: Boolean = compilationUnit.isDefined override def dirty = true override protected def currentContent: String = Objects.toString(compilationUnit.getOrElse("")) override def childNodeNames: Set[String] = Set(JavaTypeAlias) override def childrenNamed(fieldName: String): Seq[MutableView[_]] = (compilationUnit, fieldName) match { case (None, _) => Nil case (Some(cu), JavaTypeAlias) => cu.getTypes.asScala .collect { case c: ClassOrInterfaceDeclaration => c }.map(c => new JavaClassOrInterfaceMutableView(c, this)) case _ => throw new RugRuntimeException(null, s"No child with name '$fieldName' in ${getClass.getSimpleName}") } override def commit(): Unit = if (dirty) { val latest = currentBackingObject parent.updateFile(old, latest) } @ExportFunction(readOnly = true, description = "Return the Java project") def javaProject: JavaProjectMutableView = new JavaProjectMutableView(parent) @ExportFunction(readOnly = true, description = "Return the package name") def pkg: String = compilationUnit match { case Some(cu) => cu.getPackageDeclaration.get().getNameAsString case None => "" } @ExportFunction(readOnly = true, description = "Count the types in this source file") def typeCount: Int = compilationUnit match { case Some(cu) => cu.getTypes.size() case None => 0 } @ExportFunction(readOnly = false, description = "Move the source file to the given package") def movePackage(@ExportFunctionParameterDescription(name = "newPackage", description = "The package to move the source file to") newPackage: String): Unit = compilationUnit match { case Some(cu) => val pathToReplace = pkg.replace(".", "/") val newPath = newPackage.replace(".", "/") cu.setPackageDeclaration(new PackageDeclaration(new Name(newPackage))) setPath(path.replace(pathToReplace, newPath)) case None => } def rename(newName: String): Unit = { setName(newName) } }
Example 22
Source File: ExceptionEnhancer.scala From rug with GNU General Public License v3.0 | 5 votes |
package com.atomist.rug.runtime.js import java.util.Objects import com.atlassian.sourcemap.SourceMapImpl import com.atomist.source.ArtifactSource import com.atomist.tree.content.text.{LineInputPosition, LineInputPositionImpl} import jdk.nashorn.internal.objects.{NativeError, NativeTypeError} import jdk.nashorn.internal.runtime.ECMAException case class RuntimeErrorInfo(message: String, filePath: String, pos: LineInputPosition, detail: Object) { override def toString: String = s"[$message] at $positionInfo" def positionInfo: String = s"$filePath:${pos.lineFrom1}/${pos.colFrom1}\n${pos.show}" } class SourceLanguageRuntimeException(jsRuntimeErrorInfo: RuntimeErrorInfo, val sourceLangRuntimeErrorInfo: RuntimeErrorInfo ) extends JavaScriptRuntimeException(jsRuntimeErrorInfo, sourceLangRuntimeErrorInfo.toString + "\n--via generated code--\n" + jsRuntimeErrorInfo.positionInfo) { }
Example 23
Source File: CustomShuffledRDD.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler import java.util.Arrays import java.util.Objects import org.apache.spark._ import org.apache.spark.rdd.RDD class CustomShuffledRDD[K, V, C]( var dependency: ShuffleDependency[K, V, C], partitionStartIndices: Array[Int]) extends RDD[(K, C)](dependency.rdd.context, Seq(dependency)) { def this(dep: ShuffleDependency[K, V, C]) = { this(dep, (0 until dep.partitioner.numPartitions).toArray) } override def getDependencies: Seq[Dependency[_]] = List(dependency) override val partitioner = { Some(new CoalescedPartitioner(dependency.partitioner, partitionStartIndices)) } override def getPartitions: Array[Partition] = { val n = dependency.partitioner.numPartitions Array.tabulate[Partition](partitionStartIndices.length) { i => val startIndex = partitionStartIndices(i) val endIndex = if (i < partitionStartIndices.length - 1) partitionStartIndices(i + 1) else n new CustomShuffledRDDPartition(i, startIndex, endIndex) } } override def compute(p: Partition, context: TaskContext): Iterator[(K, C)] = { val part = p.asInstanceOf[CustomShuffledRDDPartition] SparkEnv.get.shuffleManager.getReader( dependency.shuffleHandle, part.startIndexInParent, part.endIndexInParent, context) .read() .asInstanceOf[Iterator[(K, C)]] } override def clearDependencies() { super.clearDependencies() dependency = null } }
Example 24
Source File: BasicAccess.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.client.configurator import java.util.Objects import oharastream.ohara.client.HttpExecutor import oharastream.ohara.client.configurator.BasicAccess.UrlBuilder import oharastream.ohara.common.setting.ObjectKey import oharastream.ohara.common.util.CommonUtils import scala.concurrent.{ExecutionContext, Future} protected final def url: String = s"http://${CommonUtils.requireNonEmpty(hostname)}:${CommonUtils .requireConnectionPort(port)}/${CommonUtils.requireNonEmpty(version)}/${CommonUtils.requireNonEmpty(prefixPath)}" protected def urlBuilder: UrlBuilder = (prefix, key, postfix, params) => { var url = BasicAccess.this.url prefix.foreach(s => url = s"$url/$s") key.foreach(k => url = s"$url/${k.name()}") postfix.foreach(s => url = s"$url/$s") key.foreach(k => url = s"$url?$GROUP_KEY=${k.group()}") val divider = key match { case None => "?" case Some(_) => "&" } if (params.nonEmpty) url = url + divider + params .map { case (key, value) => s"$key=$value" } .mkString("&") url } } object BasicAccess { trait UrlBuilder extends oharastream.ohara.common.pattern.Builder[String] { private[this] var prefix: Option[String] = None private[this] var key: Option[ObjectKey] = None private[this] var postfix: Option[String] = None private[this] var params: Map[String, String] = Map.empty def prefix(prefix: String): UrlBuilder = { this.prefix = Some(CommonUtils.requireNonEmpty(prefix)) this } def key(key: ObjectKey): UrlBuilder = { this.key = Some(key) this } def postfix(postfix: String): UrlBuilder = { this.postfix = Some(CommonUtils.requireNonEmpty(postfix)) this } def param(key: String, value: String): UrlBuilder = { this.params += (key -> value) this } def params(params: Map[String, String]): UrlBuilder = { this.params ++= Objects.requireNonNull(params) this } override def build(): String = doBuild( prefix = prefix, key = key, postfix = postfix, params = params ) protected def doBuild( prefix: Option[String], key: Option[ObjectKey], postfix: Option[String], params: Map[String, String] ): String } }
Example 25
Source File: MetricsCache.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.configurator.store import java.util.Objects import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicBoolean import oharastream.ohara.client.configurator.BrokerApi.BrokerClusterInfo import oharastream.ohara.client.configurator.ClusterInfo import oharastream.ohara.client.configurator.MetricsApi.Metrics import oharastream.ohara.client.configurator.ShabondiApi.ShabondiClusterInfo import oharastream.ohara.client.configurator.StreamApi.StreamClusterInfo import oharastream.ohara.client.configurator.WorkerApi.WorkerClusterInfo import oharastream.ohara.client.configurator.ZookeeperApi.ZookeeperClusterInfo import oharastream.ohara.common.annotations.{Optional, VisibleForTesting} import oharastream.ohara.common.cache.RefreshableCache import oharastream.ohara.common.setting.ObjectKey import oharastream.ohara.common.util.Releasable import scala.concurrent.duration.Duration trait MetricsCache extends Releasable { def meters(clusterInfo: ClusterInfo, key: ObjectKey): Map[String, Metrics] = meters(clusterInfo) .map { case (hostname, keyAndMeters) => hostname -> keyAndMeters.getOrElse(key, Metrics.EMPTY) } } object MetricsCache { def builder: Builder = new Builder() // TODO: remove this workaround if google guava support the custom comparison ... by chia @VisibleForTesting private[store] case class RequestKey(key: ObjectKey, service: String) { override def equals(obj: Any): Boolean = obj match { case another: RequestKey => another.key == key && another.service == service case _ => false } override def hashCode(): Int = 31 * key.hashCode + service.hashCode override def toString: String = s"key:$key, service:$service" } class Builder private[MetricsCache] extends oharastream.ohara.common.pattern.Builder[MetricsCache] { private[this] var refresher: () => Map[ClusterInfo, Map[String, Map[ObjectKey, Metrics]]] = _ private[this] var frequency: Duration = Duration(5, TimeUnit.SECONDS) def refresher(refresher: () => Map[ClusterInfo, Map[String, Map[ObjectKey, Metrics]]]): Builder = { this.refresher = Objects.requireNonNull(refresher) this } @Optional("default value is equal to timeout") def frequency(frequency: Duration): Builder = { this.frequency = Objects.requireNonNull(frequency) this } override def build: MetricsCache = new MetricsCache { import scala.jdk.CollectionConverters._ private[this] val refresher = Objects.requireNonNull(Builder.this.refresher) private[this] val closed = new AtomicBoolean(false) private[this] val cache = RefreshableCache .builder[RequestKey, Map[String, Map[ObjectKey, Metrics]]]() .supplier( () => refresher().map { case (clusterInfo, meters) => key(clusterInfo) -> meters }.asJava ) .frequency(java.time.Duration.ofMillis(frequency.toMillis)) .build() private[this] def key(clusterInfo: ClusterInfo): RequestKey = RequestKey( key = clusterInfo.key, service = clusterInfo match { case _: ZookeeperClusterInfo => "zk" case _: BrokerClusterInfo => "bk" case _: WorkerClusterInfo => "wk" case _: StreamClusterInfo => "stream" case _: ShabondiClusterInfo => "shabondi" case c: ClusterInfo => c.getClass.getSimpleName // used by testing } ) override def meters(clusterInfo: ClusterInfo): Map[String, Map[ObjectKey, Metrics]] = cache.get(key(clusterInfo)).orElse(Map.empty) override def close(): Unit = if (closed.compareAndSet(false, true)) Releasable.close(cache) } } }
Example 26
Source File: TestContainerCreator.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.agent.docker import java.util.Objects import oharastream.ohara.client.configurator.VolumeApi.Volume import oharastream.ohara.common.rule.OharaTest import oharastream.ohara.common.util.CommonUtils import org.junit.Test import org.scalatest.matchers.should.Matchers._ import scala.concurrent.{ExecutionContext, Future} class TestContainerCreator extends OharaTest { private[this] def fake(): DockerClient.ContainerCreator = ( nodeName: String, hostname: String, imageName: String, volumeMaps: Map[Volume, String], name: String, command: Option[String], arguments: Seq[String], ports: Map[Int, Int], envs: Map[String, String], routes: Map[String, String], _: ExecutionContext ) => Future.successful { // we check only the required arguments CommonUtils.requireNonEmpty(nodeName) CommonUtils.requireNonEmpty(hostname) CommonUtils.requireNonEmpty(imageName) CommonUtils.requireNonEmpty(name) Objects.requireNonNull(command) Objects.requireNonNull(ports) Objects.requireNonNull(envs) Objects.requireNonNull(routes) Objects.requireNonNull(arguments) Objects.requireNonNull(volumeMaps) } @Test def nullHostname(): Unit = an[NullPointerException] should be thrownBy fake().hostname(null) @Test def emptyHostname(): Unit = an[IllegalArgumentException] should be thrownBy fake().hostname("") @Test def nullImageName(): Unit = an[NullPointerException] should be thrownBy fake().imageName(null) @Test def emptyImageName(): Unit = an[IllegalArgumentException] should be thrownBy fake().imageName("") @Test def nullName(): Unit = an[NullPointerException] should be thrownBy fake().name(null) @Test def emptyName(): Unit = an[IllegalArgumentException] should be thrownBy fake().name("") @Test def nullCommand(): Unit = an[NullPointerException] should be thrownBy fake().command(null) @Test def emptyCommand(): Unit = fake().command("") @Test def nullPorts(): Unit = an[NullPointerException] should be thrownBy fake().portMappings(null) @Test def emptyPorts(): Unit = fake().portMappings(Map.empty) @Test def nullEnvs(): Unit = an[NullPointerException] should be thrownBy fake().envs(null) @Test def emptyEnvs(): Unit = fake().envs(Map.empty) @Test def nullRoute(): Unit = an[NullPointerException] should be thrownBy fake().routes(null) @Test def emptyRoute(): Unit = fake().routes(Map.empty) @Test def nullArguments(): Unit = an[NullPointerException] should be thrownBy fake().arguments(null) @Test def emptyArguments(): Unit = fake().arguments(Seq.empty) @Test def testExecuteWithoutRequireArguments(): Unit = // At least assign imageName an[NullPointerException] should be thrownBy fake().create() }