java.util.Map Scala Examples

The following examples show how to use java.util.Map. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: JDBMStoreIndexer.scala    From dbpedia-spotlight-model   with Apache License 2.0 5 votes vote down vote up
package org.dbpedia.spotlight.db

import org.dbpedia.spotlight.db.disk.JDBMStore
import java.io.File
import org.apache.commons.lang.NotImplementedException

import scala.Predef._
import scala._
import java.util.Map
import org.dbpedia.spotlight.model._



class JDBMStoreIndexer(val baseDir: File)
  extends TokenOccurrenceIndexer
{

  //Token OCCURRENCES
  lazy val contextStore = new JDBMStore[Int, Triple[Array[Int], Array[Int], Int]](new File(baseDir, "context.disk").getAbsolutePath)

  def addTokenOccurrence(resource: DBpediaResource, token: TokenType, count: Int) {
    throw new NotImplementedException()
  }

  def addTokenOccurrence(resource: DBpediaResource, tokenCounts: Map[Int, Int]) {
    throw new NotImplementedException()
  }


  def addTokenOccurrences(occs: Iterator[Triple[DBpediaResource, Array[TokenType], Array[Int]]]) {

    occs.filter(t => t!=null && t._1 != null).foreach{
      t: Triple[DBpediaResource, Array[TokenType], Array[Int]] => {
        val Triple(res, tokens, counts) = t
        if (res != null) {
          assert (tokens.size == counts.size)
          if(contextStore.get(res.id) != null) {
            val Triple(existingTokens, exisitingCounts, _) = contextStore.get(res.id)

            val (mergedTokens, mergedCounts) = (tokens.map{ t: TokenType => t.id }.array.zip(counts.array) ++ existingTokens.zip( exisitingCounts )).groupBy(_._1).map{ case(k, v) => (k, v.map{ p => p._2}.sum ) }.unzip
            contextStore.add(res.id, Triple(mergedTokens.toArray.array, mergedCounts.toArray.array, mergedCounts.sum))
          } else{
            contextStore.add(res.id, Triple(tokens.map{ t: TokenType => t.id }.array, counts.array, counts.sum))
          }
        }
      }
    }

    writeTokenOccurrences()
  }

  def addTokenOccurrences(occs: Map[DBpediaResource, Map[Int, Int]]) {
    throw new NotImplementedException()
  }

  def createContextStore(n: Int) {
    //
  }

  def writeTokenOccurrences() {
    contextStore.commit()
  }

} 
Example 2
Source File: MemoryContextStore.scala    From dbpedia-spotlight-model   with Apache License 2.0 5 votes vote down vote up
package org.dbpedia.spotlight.db.memory

import java.util.{HashMap, Map}

import com.esotericsoftware.kryo.io.{Input, Output}
import com.esotericsoftware.kryo.{Kryo, KryoException, KryoSerializable}
import org.apache.commons.lang.NotImplementedException
import org.dbpedia.spotlight.db.model.{ContextStore, TokenTypeStore}
import org.dbpedia.spotlight.model.{DBpediaResource, TokenType}



  def calculateTotalTokenCounts(){
    var i = 0
    while(i < counts.size){

      if (counts(i).isInstanceOf[Array[Short]]){
        var j = 0

        while(j < counts(i).size ){
          totalTokenCounts(i) += qc(counts(i)(j))
          j += 1
        }

      }
      i += 1
    }
  }


  def read(kryo: Kryo, input: Input) {
    val size = input.readInt()

    tokens = new Array[Array[Int]](size)
    counts = new Array[Array[Short]](size)
    totalTokenCounts = new Array[Int](size)

    var i = 0
    var j = 0

    while(i < size) {
      val subsize = input.readInt()

      if (subsize > 0) {
        tokens(i) = new Array[Int](subsize)
        counts(i) = new Array[Short](subsize)

        j = 0
        while(j < subsize) {
          tokens(i)(j) = input.readInt()
          j += 1
        }

        j = 0
        while(j < subsize) {
          counts(i)(j) = input.readShort()
          j += 1
        }
     }

     i += 1
   }

   if(input.readChar() != '#')
     throw new KryoException("Error in deserializing context store...")

  }

} 
Example 3
Source File: MDCSupport.scala    From money   with Apache License 2.0 5 votes vote down vote up
package com.comcast.money.core.internal

import java.util.Map

import com.comcast.money.api.SpanId
import com.comcast.money.core.Money
import org.slf4j.MDC

object MDCSupport {

  val LogFormat = "[ span-id=%s ][ trace-id=%s ][ parent-id=%s ]"

  def format(spanId: SpanId) = LogFormat.format(spanId.selfId, spanId.traceId, spanId.parentId)
}


class MDCSupport(enabled: Boolean = Money.Environment.enabled) {

  private val MoneyTraceKey = "moneyTrace"
  private val SpanNameKey = "spanName"

  def setSpanMDC(spanId: Option[SpanId]): Unit = if (enabled) {
    spanId match {
      case Some(id) => MDC.put(MoneyTraceKey, MDCSupport.format(id))
      case None => MDC.remove(MoneyTraceKey)
    }
  }

  def propogateMDC(submittingThreadsContext: Option[Map[_, _]]): Unit = if (enabled) {
    submittingThreadsContext match {
      case Some(context: Map[String, String]) => MDC.setContextMap(context)
      case None => MDC.clear()
    }
  }

  def setSpanNameMDC(spanName: Option[String]) =
    if (enabled) {
      spanName match {
        case Some(name) => MDC.put(SpanNameKey, name)
        case None => MDC.remove(SpanNameKey)
      }
    }

  def getSpanNameMDC: Option[String] = Option(MDC.get(SpanNameKey))
} 
Example 4
Source File: Runner.scala    From avrohugger   with Apache License 2.0 5 votes vote down vote up
package avrohugger
package tool

import format.abstractions.SourceFormat
import format.{Scavro, SpecificRecord, Standard}
import java.util.Arrays
import java.util.Map
import java.util.TreeMap
import java.io.{InputStream, PrintStream}

import org.apache.avro.tool.Tool

import scala.util.{Failure, Success, Try}
import scala.collection.JavaConverters._



  def run(args: Array[String]): Int = {
    if (args.length != 0) {
      val tool: Tool = toolsMap.get(args(0))
      if (tool != null) {
        val result = Try {
          tool.run(
            in, out, err, Arrays.asList(args: _*).subList(1, args.length))
        }
        result match {
          case Success(0) => 0
          case Success(exitCode) =>
            err.println("Tool " + args(0) + " failed with exit code " + exitCode)
            exitCode
          case Failure(e) =>
            err.println("Tool " + args(0) + " failed: " + e.toString)
            1
        }
      } else {
        err.println("Unknown tool: " + args(0))
        1
      }
    } else {
      err.println("----------------")

      err.println("Available tools:")
      for (k <- toolsMap.asScala.values) {
        err.printf("%" + maxLen + "s  %s\n", k.getName(), k.getShortDescription())
      }

      1
    }
  }
} 
Example 5
Source File: IotHubSinkConfig.scala    From toketi-kafka-connect-iothub   with MIT License 5 votes vote down vote up
package com.microsoft.azure.iot.kafka.connect.sink

import java.util.Map

import com.microsoft.azure.sdk.iot.service.DeliveryAcknowledgement
import org.apache.kafka.common.config.ConfigDef.{Importance, Type, Width}
import org.apache.kafka.common.config.{AbstractConfig, ConfigDef}

object IotHubSinkConfig {

  val IotHubConnectionString    = "IotHub.ConnectionString"
  val IotHubMessageDeliveryAcknowledgement = "IotHub.MessageDeliveryAcknowledgement"

  private val IotHubConnectionStringDoc =
    """IoT Hub ConnectionString. (see "IoT Hub" >> your hub >> "Shared access policies" >> "service" >> """ +
      """"Connection string")"""
  private val IotHubMessageDeliveryAcknowledgementDoc = "The type of delivery acknowledgement for a C2D message. " +
    "Valid values are None, Full, NegativeOnly, PositiveOnly"
  private val iotConfigGroup = "Azure IoT Hub"
  private val validDeliveryAcknowledgementString = ConfigDef.ValidString.in(
    DeliveryAcknowledgement.None.toString,
    DeliveryAcknowledgement.Full.toString,
    DeliveryAcknowledgement.PositiveOnly.toString,
    DeliveryAcknowledgement.NegativeOnly.toString)

  lazy val configDef = new ConfigDef()
    .define(IotHubConnectionString, Type.STRING, Importance.HIGH, IotHubConnectionStringDoc, iotConfigGroup, 1,
      Width.MEDIUM, "IoT Hub Connection String")
    .define(IotHubMessageDeliveryAcknowledgement, Type.STRING, DeliveryAcknowledgement.None.toString,
      validDeliveryAcknowledgementString, Importance.HIGH, IotHubMessageDeliveryAcknowledgementDoc, iotConfigGroup, 1,
      Width.MEDIUM, "Delivery acknowledgement")

  def getConfig(configValues: Map[String, String]): IotHubSinkConfig = {
    new IotHubSinkConfig(configDef, configValues)
  }
}

class IotHubSinkConfig(configDef: ConfigDef, configValues: Map[String, String])
  extends AbstractConfig(configDef, configValues) 
Example 6
Source File: IotHubSourceConfig.scala    From toketi-kafka-connect-iothub   with MIT License 5 votes vote down vote up
// Copyright (c) Microsoft. All rights reserved.

package com.microsoft.azure.iot.kafka.connect.source

import com.microsoft.azure.eventhubs.EventHubClient
import org.apache.kafka.common.config.ConfigDef.{Importance, Type, Width}
import org.apache.kafka.common.config.{AbstractConfig, ConfigDef}

import java.util.Map

object IotHubSourceConfig {

  private val defaultBatchSize = 100
  private val defaultReceiveTimeout = 60
  private val iotConfigGroup   = "Azure IoT Hub"
  private val kafkaConfig      = "Kafka"

  val EventHubCompatibleConnectionString = "IotHub.EventHubCompatibleConnectionString"
  val EventHubCompatibleName             = "IotHub.EventHubCompatibleName"
  val EventHubCompatibleNameDoc          =
    """EventHub compatible name ("IoT Hub" >> your hub >> "Endpoints" >> "Events" >> "Event Hub-compatible name")"""
  val EventHubCompatibleEndpoint        = "IotHub.EventHubCompatibleEndpoint"
  val EventHubCompatibleEndpointDoc     =
    """EventHub compatible endpoint ("IoT Hub" >> your hub >> "Endpoints" >> "Events" >> "Event Hub-compatible """ +
      """endpoint")"""
  val IotHubAccessKeyName                = "IotHub.AccessKeyName"
  val IotHubAccessKeyNameDoc             =
    """IotHub access key name ("IoT Hub" >> your hub >> "Shared access policies", default is service)"""
  val IotHubAccessKeyValue               = "IotHub.AccessKeyValue"
  val IotHubAccessKeyValueDoc            =
    """IotHub access key value ("IoT Hub" >> your hub >> "Shared access policies" >> key name >> "Primary key")"""
  val IotHubConsumerGroup                = "IotHub.ConsumerGroup"
  val IotHubConsumerGroupDoc             = "The IoT Hub consumer group"
  val IotHubPartitions                   = "IotHub.Partitions"
  val IotHubPartitionsDoc                = "Number of IoT Hub partitions"
  val KafkaTopic                         = "Kafka.Topic"
  val KafkaTopicDoc                      = "Kafka topic to copy data to"
  val BatchSize                          = "BatchSize"
  val BatchSizeDoc                       = "The batch size for fetching records from IoT Hub"
  val ReceiveTimeout                     = "ReceiveTimeout"
  val ReceiveTimeoutDoc                  = "Max time to spend receiving messages from IoT Hub"
  val IotHubOffset                       = "IotHub.Offsets"
  val IotHubOffsetDoc                    =
    "Offset for each partition in IotHub, as a comma separated string. This value is ignored if IotHubStartTime is specified."
  val IotHubStartTime                    = "IotHub.StartTime"
  val IotHubStartTimeDoc                 = "The time after which to process messages from IoT Hub If this value " +
    "is specified, IotHubOffset value is ignored."
  val TaskPartitionOffsetsMap            = "TaskPartitions"

  lazy val configDef = new ConfigDef()
    .define(EventHubCompatibleName, Type.STRING, Importance.HIGH, EventHubCompatibleNameDoc, iotConfigGroup, 1, Width
      .MEDIUM, "Event Hub compatible name")
    .define(EventHubCompatibleEndpoint, Type.STRING, Importance.HIGH, EventHubCompatibleEndpointDoc,
      iotConfigGroup, 2, Width.MEDIUM, "Event Hub compatible endpoint")
    .define(IotHubAccessKeyName, Type.STRING, Importance.HIGH, IotHubAccessKeyNameDoc, iotConfigGroup, 3, Width.SHORT,
      "Access key name")
    .define(IotHubAccessKeyValue, Type.STRING, Importance.HIGH, IotHubAccessKeyValueDoc, iotConfigGroup, 4,
      Width.LONG, "Access key value")
    .define(IotHubConsumerGroup, Type.STRING, EventHubClient.DEFAULT_CONSUMER_GROUP_NAME, Importance.MEDIUM,
      IotHubConsumerGroupDoc, iotConfigGroup, 5, Width.SHORT, "Consumer group")
    .define(IotHubPartitions, Type.INT, Importance.HIGH, IotHubPartitionsDoc, iotConfigGroup, 6, Width.SHORT,
      "IoT Hub partitions")
    .define(IotHubStartTime, Type.STRING, "", Importance.MEDIUM, IotHubStartTimeDoc, iotConfigGroup, 7, Width.MEDIUM,
      "Start time")
    .define(IotHubOffset, Type.STRING, "", Importance.MEDIUM, IotHubOffsetDoc, iotConfigGroup, 8, Width.MEDIUM,
      "Per partition offsets")
    .define(BatchSize, Type.INT, defaultBatchSize, Importance.MEDIUM, IotHubOffsetDoc, iotConfigGroup, 9, Width.SHORT,
      "Batch size")
    .define(ReceiveTimeout, Type.INT, defaultReceiveTimeout, Importance.MEDIUM, ReceiveTimeoutDoc, iotConfigGroup, 10,
      Width.SHORT, "Receive Timeout")
    .define(KafkaTopic, Type.STRING, Importance.HIGH, KafkaTopicDoc, kafkaConfig, 11, Width.MEDIUM, "Kafka topic")

  def getConfig(configValues: Map[String, String]): IotHubSourceConfig = {
    new IotHubSourceConfig(configDef, configValues)
  }
}

class IotHubSourceConfig(configDef: ConfigDef, configValues: Map[String, String])
  extends AbstractConfig(configDef, configValues) 
Example 7
Source File: IotHubPartitionSource.scala    From toketi-kafka-connect-iothub   with MIT License 5 votes vote down vote up
// Copyright (c) Microsoft. All rights reserved.

package com.microsoft.azure.iot.kafka.connect.source

import java.util.{Collections, Map}

import com.typesafe.scalalogging.LazyLogging
import org.apache.kafka.connect.data.Struct
import org.apache.kafka.connect.errors.ConnectException
import org.apache.kafka.connect.source.SourceRecord

import scala.collection.mutable.ListBuffer
import scala.util.control.NonFatal

class IotHubPartitionSource(val dataReceiver: DataReceiver,
    val partition: String,
    val topic: String,
    val batchSize: Int,
    val eventHubName: String,
    val sourcePartition: Map[String, String])
  extends LazyLogging
    with JsonSerialization {

  def getRecords: List[SourceRecord] = {

    logger.debug(s"Polling for data from eventHub $eventHubName partition $partition")
    val list = ListBuffer.empty[SourceRecord]
    try {
      val messages: Iterable[IotMessage] = this.dataReceiver.receiveData(batchSize)

      if (messages.isEmpty) {
        logger.debug(s"Finished processing all messages from eventHub $eventHubName " +
          s"partition ${this.partition}")
      } else {
        logger.debug(s"Received ${messages.size} messages from eventHub $eventHubName " +
          s"partition ${this.partition} (requested $batchSize batch)")

        for (msg: IotMessage <- messages) {

          val kafkaMessage: Struct = IotMessageConverter.getIotMessageStruct(msg)
          val sourceOffset = Collections.singletonMap("EventHubOffset",
            kafkaMessage.getString(IotMessageConverter.offsetKey))
          val sourceRecord = new SourceRecord(sourcePartition, sourceOffset, this.topic, kafkaMessage.schema(),
            kafkaMessage)
          list += sourceRecord
        }
      }
    } catch {
      case NonFatal(e) =>
        val errorMsg = s"Error while getting SourceRecords for eventHub $eventHubName " +
          s"partition $partition. Exception - ${e.toString} Stack trace - ${e.printStackTrace()}"
        logger.error(errorMsg)
        throw new ConnectException(errorMsg, e)
    }
    logger.debug(s"Obtained ${list.length} SourceRecords from IotHub")
    list.toList
  }
} 
Example 8
Source File: HogEvent.scala    From hogzilla   with GNU General Public License v2.0 5 votes vote down vote up
package org.hogzilla.event

import java.util.HashMap
import java.util.Map
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.util.Bytes
import org.hogzilla.hbase.HogHBaseRDD
import org.hogzilla.util.HogFlow
import java.net.InetAddress


class HogEvent(flow:HogFlow) 
{
	var sensorid:Int=0
	var signature_id:Double=0
	var priorityid:Int=0
	var text:String=""
	var data:Map[String,String]=new HashMap()
  var ports:String=""
  var title:String=""
  var username:String=""
  var coords:String=""
 
  
  def formatIPtoBytes(ip:String):Array[Byte] =
  {
    try {
       // Eca! Snorby doesn't support IPv6 yet. See https://github.com/Snorby/snorby/issues/65
    if(ip.contains(":"))
      InetAddress.getByName("255.255.6.6").getAddress
    else  
      InetAddress.getByName(ip).getAddress
    } catch {
      case t: Throwable => 
        // Bogus address!
        InetAddress.getByName("255.255.1.1").getAddress
    }   
   
  }

  
   def alert()
   {
	   val put = new Put(Bytes.toBytes(flow.get("flow:id")))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("note"), Bytes.toBytes(text))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("lower_ip"), formatIPtoBytes(flow.lower_ip))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("upper_ip"), formatIPtoBytes(flow.upper_ip))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("lower_ip_str"), Bytes.toBytes(flow.lower_ip))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("upper_ip_str"), Bytes.toBytes(flow.upper_ip))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("signature_id"), Bytes.toBytes("%.0f".format(signature_id)))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("time"), Bytes.toBytes(System.currentTimeMillis))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("ports"), Bytes.toBytes(ports))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("title"), Bytes.toBytes(title))
     
     if(!username.equals(""))
       put.add(Bytes.toBytes("event"), Bytes.toBytes("username"), Bytes.toBytes(username))
     if(!coords.equals(""))
       put.add(Bytes.toBytes("event"), Bytes.toBytes("coords"), Bytes.toBytes(coords))
     
     HogHBaseRDD.hogzilla_events.put(put)

     //println(f"ALERT: $text%100s\n\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
   }
} 
Example 9
Source File: S3ConfigPage.scala    From teamcity-s3-plugin   with Apache License 2.0 5 votes vote down vote up
package com.gu.teamcity

import jetbrains.buildServer.controllers.admin.AdminPage
import jetbrains.buildServer.serverSide.auth.Permission
import jetbrains.buildServer.web.openapi.{Groupable, PagePlaces, PluginDescriptor}
import javax.servlet.http.HttpServletRequest
import java.util.Map

class S3ConfigPage(extension: S3ConfigManager, pagePlaces: PagePlaces, descriptor: PluginDescriptor)
  extends AdminPage(pagePlaces, "S3", descriptor.getPluginResourcesPath("input.jsp"), "S3") {

  register()

  override def fillModel(model: Map[String, AnyRef], request: HttpServletRequest) {
    import collection.convert.wrapAll._

    model.putAll(extension.details.mapValues(_.getOrElse("")))
  }

  override def isAvailable(request: HttpServletRequest): Boolean = {
    super.isAvailable(request) && checkHasGlobalPermission(request, Permission.CHANGE_SERVER_SETTINGS)
  }

  def getGroup: String = {
    Groupable.SERVER_RELATED_GROUP
  }
}