java.nio.ByteOrder Scala Examples

The following examples show how to use java.nio.ByteOrder. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: VoiceUDPFlow.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.voice

import java.net.InetSocketAddress
import java.nio.ByteOrder

import scala.concurrent.{Future, Promise}

import ackcord.data.{RawSnowflake, UserId}
import ackcord.util.UdpConnectedFlow
import akka.NotUsed
import akka.actor.typed.ActorSystem
import akka.stream.scaladsl.{BidiFlow, Concat, Flow, GraphDSL, Keep, Source}
import akka.stream.{BidiShape, OverflowStrategy}
import akka.util.ByteString

object VoiceUDPFlow {

  val silence = ByteString(0xF8, 0xFF, 0xFE)

  val SampleRate = 48000
  val FrameSize  = 960
  val FrameTime  = 20

  def flow[Mat](
      remoteAddress: InetSocketAddress,
      ssrc: Int,
      serverId: RawSnowflake,
      userId: UserId,
      secretKeys: Source[Option[ByteString], Mat]
  )(implicit system: ActorSystem[Nothing]): Flow[ByteString, AudioAPIMessage.ReceivedData, (Mat, Future[FoundIP])] =
    NaclBidiFlow
      .bidiFlow(ssrc, serverId, userId, secretKeys)
      .atopMat(voiceBidi(ssrc).reversed)(Keep.both)
      .async
      .join(Flow[ByteString].buffer(32, OverflowStrategy.backpressure).via(UdpConnectedFlow.flow(remoteAddress)))

  def voiceBidi(ssrc: Int): BidiFlow[ByteString, ByteString, ByteString, ByteString, Future[FoundIP]] = {
    implicit val byteOrder: ByteOrder = ByteOrder.BIG_ENDIAN
    val ipDiscoveryPacket = {
      val byteBuilder = ByteString.createBuilder
      byteBuilder.sizeHint(74)
      byteBuilder.putShort(0x1).putShort(70).putInt(ssrc)

      byteBuilder.putBytes(new Array[Byte](66))

      byteBuilder.result()
    }

    val valvePromise = Promise[Unit]
    val valve        = Source.future(valvePromise.future).drop(1).asInstanceOf[Source[ByteString, NotUsed]]

    val ipDiscoveryFlow = Flow[ByteString]
      .viaMat(new IPDiscoveryFlow(() => valvePromise.success(())))(Keep.right)

    BidiFlow
      .fromGraph(GraphDSL.create(ipDiscoveryFlow) { implicit b => ipDiscovery =>
        import GraphDSL.Implicits._

        val voiceIn = b.add(Flow[ByteString])

        val ipDiscoverySource           = b.add(Source.single(ipDiscoveryPacket) ++ valve)
        val ipDiscoveryAndThenVoiceData = b.add(Concat[ByteString]())

        ipDiscoverySource ~> ipDiscoveryAndThenVoiceData
        voiceIn ~> ipDiscoveryAndThenVoiceData

        BidiShape(
          ipDiscovery.in,
          ipDiscovery.out,
          voiceIn.in,
          ipDiscoveryAndThenVoiceData.out
        )
      })
  }

  
  case class FoundIP(address: String, port: Int)
} 
Example 2
Source File: RTPHeader.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.voice

import java.nio.{ByteBuffer, ByteOrder}

import akka.util.ByteString


  def fromBytes(bytes: ByteString): (RTPHeader, ByteString) = {
    val (header, extra) = bytes.splitAt(12)

    val buffer    = header.asByteBuffer.order(ByteOrder.BIG_ENDIAN)
    val tpe       = buffer.get()
    val version   = buffer.get()
    val sequence  = buffer.getShort()
    val timestamp = buffer.getInt()
    val ssrc      = buffer.getInt()

    //https://tools.ietf.org/html/rfc5285#section-4.2
    //I have no idea what this does
    if (tpe == 0x90 && extra(0) == 0xBE && extra(1) == 0xDE) {
      val hlen = extra(2) << 8 | extra(3)
      var i    = 4

      while (i < hlen + 4) {
        val b   = extra(i)
        val len = (b & 0x0F) + 1
        i += (len + 1)
      }
      while (extra(i) == 0) i += 1

      val newAudio = extra.drop(i)
      (RTPHeader(tpe, version, sequence, timestamp, ssrc), newAudio)
    } else (RTPHeader(tpe, version, sequence, timestamp, ssrc), extra)
  }

  def apply(sequence: Short, timestamp: Int, ssrc: Int): RTPHeader =
    RTPHeader(0x80.toByte, 0x78, sequence, timestamp, ssrc)
} 
Example 3
Source File: TFRecordCodec.scala    From scio   with Apache License 2.0 5 votes vote down vote up
package com.spotify.scio.tensorflow

import java.io.{InputStream, PushbackInputStream}
import java.nio.channels.Channels
import java.nio.{ByteBuffer, ByteOrder}
import java.util.zip.GZIPInputStream

import org.apache.beam.sdk.io.Compression
import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.hash.Hashing
import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.primitives.Ints
import org.apache.commons.compress.compressors.deflate._
import org.apache.commons.compress.compressors.gzip._

private object TFRecordCodec {
  private val headerLength: Int =
    (java.lang.Long.SIZE + java.lang.Integer.SIZE) / java.lang.Byte.SIZE
  private val footerLength: Int = java.lang.Integer.SIZE / java.lang.Byte.SIZE
  private val crc32c = Hashing.crc32c()

  private def mask(crc: Int): Int = ((crc >>> 15) | (crc << 17)) + 0xa282ead8

  def read(input: InputStream): Array[Byte] = {
    val headerBytes = readFully(input, headerLength)
    if (headerBytes != null) {
      val headerBuf =
        ByteBuffer.wrap(headerBytes).order(ByteOrder.LITTLE_ENDIAN)
      val length = headerBuf.getLong
      val maskedCrc32OfLength = headerBuf.getInt
      require(hashLong(length) == maskedCrc32OfLength, "Invalid masked CRC32 of length")

      val data = readFully(input, length.toInt)

      val footerBytes = readFully(input, footerLength)
      val footerBuf =
        ByteBuffer.wrap(footerBytes).order(ByteOrder.LITTLE_ENDIAN)
      val maskedCrc32OfData = footerBuf.getInt
      require(hashBytes(data) == maskedCrc32OfData, "Invalid masked CRC32 of data")
      data
    } else {
      null
    }
  }

  // InflaterInputStream#read may not fill a buffer fully even when there are more data available
  private def readFully(input: InputStream, length: Int): Array[Byte] = {
    val data = Array.ofDim[Byte](length)
    var n = 0
    var off = 0
    do {
      n = input.read(data, off, data.length - off)
      if (n > 0) {
        off += n
      }
    } while (n > 0 && off < data.length)
    if (n <= 0) null else data
  }

  def wrapInputStream(stream: InputStream, compression: Compression): InputStream = {
    val deflateParam = new DeflateParameters()
    deflateParam.setWithZlibHeader(true)

    compression match {
      case Compression.AUTO =>
        val pushback = new PushbackInputStream(stream, 2)
        if (isInflaterInputStream(pushback)) {
          new DeflateCompressorInputStream(pushback, deflateParam)
        } else if (isGzipInputStream(pushback)) {
          new GzipCompressorInputStream(pushback)
        } else {
          pushback
        }
      case Compression.UNCOMPRESSED => stream
      case _ =>
        Channels.newInputStream(compression.readDecompressed(Channels.newChannel(stream)))
    }
  }

  private def hashLong(x: Long): Int = mask(crc32c.hashLong(x).asInt())
  private def hashBytes(x: Array[Byte]): Int = mask(crc32c.hashBytes(x).asInt())

  private def isGzipInputStream(pushback: PushbackInputStream): Boolean = {
    val b1 = pushback.read()
    val b2 = pushback.read()
    if (b2 != -1) pushback.unread(b2)
    if (b1 != -1) pushback.unread(b1)
    val zero: Byte = 0x00
    val header = Ints.fromBytes(zero, zero, b2.toByte, b1.toByte)
    (b1 != -1 && b2 != -1) && header == GZIPInputStream.GZIP_MAGIC
  }

  private def isInflaterInputStream(pushback: PushbackInputStream): Boolean = {
    val b1 = pushback.read()
    val b2 = pushback.read()
    if (b2 != -1) pushback.unread(b2)
    if (b1 != -1) pushback.unread(b1)
    (b1 != -1 && b2 != -1) && (b1 == 0x78 && (b1 * 256 + b2) % 31 == 0)
  }
} 
Example 4
Source File: TrafficMonitorThread.scala    From shadowsocksr-android   with GNU General Public License v3.0 5 votes vote down vote up
package com.github.shadowsocks.utils

import java.io.{File, IOException}
import java.nio.{ByteBuffer, ByteOrder}
import java.util.concurrent.Executors

import android.content.Context
import android.net.{LocalServerSocket, LocalSocket, LocalSocketAddress}
import android.util.Log

class TrafficMonitorThread(context: Context) extends Thread {

  val TAG = "TrafficMonitorThread"
  lazy val PATH = context.getApplicationInfo.dataDir + "/stat_path"

  @volatile var serverSocket: LocalServerSocket = null
  @volatile var isRunning: Boolean = true

  def closeServerSocket() {
    if (serverSocket != null) {
      try {
        serverSocket.close()
      } catch {
        case _: Exception => // ignore
      }
      serverSocket = null
      }
  }

  def stopThread() {
    isRunning = false
    closeServerSocket()
  }

  override def run() {

    try {
      new File(PATH).delete()
    } catch {
      case _: Exception => // ignore
    }

    try {
      val localSocket = new LocalSocket
      localSocket.bind(new LocalSocketAddress(PATH, LocalSocketAddress.Namespace.FILESYSTEM))
      serverSocket = new LocalServerSocket(localSocket.getFileDescriptor)
    } catch {
      case e: IOException =>
        Log.e(TAG, "unable to bind", e)
        return
    }

    val pool = Executors.newFixedThreadPool(1)

    while (isRunning) {
      try {
        val socket = serverSocket.accept()

        pool.execute(() => {
          try {
            val input = socket.getInputStream
            val output = socket.getOutputStream

            val buffer = new Array[Byte](16)
            if (input.read(buffer) != 16) throw new IOException("Unexpected traffic stat length")
            val stat = ByteBuffer.wrap(buffer).order(ByteOrder.LITTLE_ENDIAN)
            TrafficMonitor.update(stat.getLong(0), stat.getLong(8))

            output.write(0)

            input.close()
            output.close()

          } catch {
            case e: Exception =>
              Log.e(TAG, "Error when recv traffic stat", e)
          }

          // close socket
          try {
            socket.close()
          } catch {
            case _: Exception => // ignore
          }

        })
      } catch {
        case e: IOException =>
          Log.e(TAG, "Error when accept socket", e)
          return
      }
    }
  }
} 
Example 5
Source File: ByteUtils.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.common

import java.nio.{ByteBuffer, ByteOrder}

import scodec.bits.ByteVector

object ByteUtils {
  def or(arrays: ByteVector*): ByteVector = {
    require(arrays.map(_.length).distinct.length <= 1, "All the arrays should have the same length")
    require(arrays.nonEmpty, "There should be one or more arrays")

    val zeroes = ByteVector.fill(arrays.headOption.map(_.length).getOrElse(0))(0.toByte)
    arrays.foldLeft[ByteVector](zeroes) {
      case (acc, cur) => acc or cur
    }
  }

  def and(arrays: ByteVector*): ByteVector = {
    require(arrays.map(_.length).distinct.length <= 1, "All the arrays should have the same length")
    require(arrays.nonEmpty, "There should be one or more arrays")

    val ones = ByteVector.fill(arrays.headOption.map(_.length).getOrElse(0))(0xFF.toByte)
    arrays.foldLeft[ByteVector](ones) {
      case (acc, cur) => acc and cur
    }
  }

  def bytesToInts(bytes: Array[Byte]): Array[Int] =
    bytes.grouped(4).map(getIntFromWord).toArray

  def intsToBytes(input: Array[Int]): Array[Byte] =
    input.flatMap { i =>
      Array(
        (i & 0xFF).toByte,
        ((i >> 8) & 0xFF).toByte,
        ((i >> 16) & 0xFF).toByte,
        ((i >> 24) & 0xFF).toByte
      )
    }

  def getIntFromWord(arr: Array[Byte]): Int =
    ByteBuffer.wrap(arr, 0, 4).order(ByteOrder.LITTLE_ENDIAN).getInt
} 
Example 6
Source File: NullableColumnAccessor.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.sql.catalyst.InternalRow

private[columnar] trait NullableColumnAccessor extends ColumnAccessor {
  private var nullsBuffer: ByteBuffer = _
  private var nullCount: Int = _
  private var seenNulls: Int = 0

  private var nextNullIndex: Int = _
  private var pos: Int = 0

  abstract override protected def initialize(): Unit = {
    nullsBuffer = underlyingBuffer.duplicate().order(ByteOrder.nativeOrder())
    nullCount = ByteBufferHelper.getInt(nullsBuffer)
    nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1
    pos = 0

    underlyingBuffer.position(underlyingBuffer.position + 4 + nullCount * 4)
    super.initialize()
  }

  abstract override def extractTo(row: InternalRow, ordinal: Int): Unit = {
    if (pos == nextNullIndex) {
      seenNulls += 1

      if (seenNulls < nullCount) {
        nextNullIndex = ByteBufferHelper.getInt(nullsBuffer)
      }

      row.setNullAt(ordinal)
    } else {
      super.extractTo(row, ordinal)
    }

    pos += 1
  }

  abstract override def hasNext: Boolean = seenNulls < nullCount || super.hasNext
} 
Example 7
Source File: NullableColumnBuilder.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.sql.catalyst.InternalRow


private[columnar] trait NullableColumnBuilder extends ColumnBuilder {
  protected var nulls: ByteBuffer = _
  protected var nullCount: Int = _
  private var pos: Int = _

  abstract override def initialize(
      initialSize: Int,
      columnName: String,
      useCompression: Boolean): Unit = {

    nulls = ByteBuffer.allocate(1024)
    nulls.order(ByteOrder.nativeOrder())
    pos = 0
    nullCount = 0
    super.initialize(initialSize, columnName, useCompression)
  }

  abstract override def appendFrom(row: InternalRow, ordinal: Int): Unit = {
    columnStats.gatherStats(row, ordinal)
    if (row.isNullAt(ordinal)) {
      nulls = ColumnBuilder.ensureFreeSpace(nulls, 4)
      nulls.putInt(pos)
      nullCount += 1
    } else {
      super.appendFrom(row, ordinal)
    }
    pos += 1
  }

  abstract override def build(): ByteBuffer = {
    val nonNulls = super.build()
    val nullDataLen = nulls.position()

    nulls.limit(nullDataLen)
    nulls.rewind()

    val buffer = ByteBuffer
      .allocate(4 + nullDataLen + nonNulls.remaining())
      .order(ByteOrder.nativeOrder())
      .putInt(nullCount)
      .put(nulls)
      .put(nonNulls)

    buffer.rewind()
    buffer
  }

  protected def buildNonNulls(): ByteBuffer = {
    nulls.limit(nulls.position()).rewind()
    super.build()
  }
} 
Example 8
Source File: CompressibleColumnBuilder.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar.compression

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.columnar.{ColumnBuilder, NativeColumnBuilder}
import org.apache.spark.sql.types.AtomicType
import org.apache.spark.unsafe.Platform


private[columnar] trait CompressibleColumnBuilder[T <: AtomicType]
  extends ColumnBuilder with Logging {

  this: NativeColumnBuilder[T] with WithCompressionSchemes =>

  var compressionEncoders: Seq[Encoder[T]] = _

  abstract override def initialize(
      initialSize: Int,
      columnName: String,
      useCompression: Boolean): Unit = {

    compressionEncoders =
      if (useCompression) {
        schemes.filter(_.supports(columnType)).map(_.encoder[T](columnType))
      } else {
        Seq(PassThrough.encoder(columnType))
      }
    super.initialize(initialSize, columnName, useCompression)
  }

  // The various compression schemes, while saving memory use, cause all of the data within
  // the row to become unaligned, thus causing crashes.  Until a way of fixing the compression
  // is found to also allow aligned accesses this must be disabled for SPARC.

  protected def isWorthCompressing(encoder: Encoder[T]) = {
    CompressibleColumnBuilder.unaligned && encoder.compressionRatio < 0.8
  }

  private def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = {
    compressionEncoders.foreach(_.gatherCompressibilityStats(row, ordinal))
  }

  abstract override def appendFrom(row: InternalRow, ordinal: Int): Unit = {
    super.appendFrom(row, ordinal)
    if (!row.isNullAt(ordinal)) {
      gatherCompressibilityStats(row, ordinal)
    }
  }

  override def build(): ByteBuffer = {
    val nonNullBuffer = buildNonNulls()
    val encoder: Encoder[T] = {
      val candidate = compressionEncoders.minBy(_.compressionRatio)
      if (isWorthCompressing(candidate)) candidate else PassThrough.encoder(columnType)
    }

    // Header = null count + null positions
    val headerSize = 4 + nulls.limit()
    val compressedSize = if (encoder.compressedSize == 0) {
      nonNullBuffer.remaining()
    } else {
      encoder.compressedSize
    }

    val compressedBuffer = ByteBuffer
      // Reserves 4 bytes for compression scheme ID
      .allocate(headerSize + 4 + compressedSize)
      .order(ByteOrder.nativeOrder)
      // Write the header
      .putInt(nullCount)
      .put(nulls)

    logDebug(s"Compressor for [$columnName]: $encoder, ratio: ${encoder.compressionRatio}")
    encoder.compress(nonNullBuffer, compressedBuffer)
  }
}

private[columnar] object CompressibleColumnBuilder {
  val unaligned = Platform.unaligned()
} 
Example 9
Source File: TFRecordWriter.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf

import java.io.OutputStream
import java.nio.{ByteBuffer, ByteOrder}

import com.intel.analytics.bigdl.utils.Crc32

class TFRecordWriter(out: OutputStream) {

  private def toByteArrayAsLong(data: Long): Array[Byte] = {
    val buff = new Array[Byte](8)
    val bb = ByteBuffer.wrap(buff)
    bb.order(ByteOrder.LITTLE_ENDIAN)
    bb.putLong(data)
    buff
  }

  private def toByteArrayAsInt(data: Int): Array[Byte] = {
    val buff = new Array[Byte](4)
    val bb = ByteBuffer.wrap(buff)
    bb.order(ByteOrder.LITTLE_ENDIAN)
    bb.putInt(data)
    buff
  }

  def write(record: Array[Byte], offset: Int, length: Int): Unit = {
    val len = toByteArrayAsLong(length)
    out.write(len)
    out.write(toByteArrayAsInt(Crc32.maskedCRC32(len).toInt))
    out.write(record, offset, length)
    out.write(toByteArrayAsInt(Crc32.maskedCRC32(record, offset, length).toInt))
  }

  def write(record: Array[Byte]): Unit = {
    write(record, 0, record.length)
  }
} 
Example 10
Source File: Conv2DBackpropFilter.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity, DataFormat}
import com.intel.analytics.bigdl.nn.tf.Conv2DBackFilter
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class Conv2DBackpropFilter extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val attributes = nodeDef.getAttrMap
    val (pW, pH) =
      if (getString(attributes, "padding") == "SAME") {
        (-1, -1)
      } else {
        (0, 0)
      }
    val strideList = getIntList(attributes, "strides")
    require(strideList.head == 1, s"not support strides on batch")

    val format = getString(attributes, "data_format")
    val convBackFilter = format match {
      case "NHWC" =>
        require(strideList(3) == 1, s"not support strides on depth")
        val strideW = strideList(1)
        val strideH = strideList(2)
        Conv2DBackFilter[T](strideW, strideH, pW, pH, DataFormat.NHWC)

      case "NCHW" =>
        require(strideList(1) == 1, s"not support strides on depth")
        val strideW = strideList(2)
        val strideH = strideList(3)
        Conv2DBackFilter[T](strideW, strideH, pW, pH, DataFormat.NCHW)
      case _ =>
        throw new IllegalArgumentException(s"not supported data format: $format")
    }
    convBackFilter.asInstanceOf[AbstractModule[Activity, Activity, T]]
  }
} 
Example 11
Source File: DepthwiseConv2dNativeBackpropFilter.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity, DataFormat}
import com.intel.analytics.bigdl.nn.ops.{DepthwiseConv2DBackpropFilter, DepthwiseConv2DBackpropInput}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class DepthwiseConv2dNativeBackpropFilter extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {

    val attributes = nodeDef.getAttrMap
    val (pW, pH) =
      if (getString(attributes, "padding") == "SAME") {
        (-1, -1)
      } else {
        (0, 0)
      }
    val strideList = getIntList(attributes, "strides")
    require(strideList.head == 1, s"not support strides on batch")

    val format = getString(attributes, "data_format")
    val conv = format match {
      case "NHWC" =>
        require(strideList(3) == 1, s"not support strides on depth")
        val strideW = strideList(1)
        val strideH = strideList(2)
        DepthwiseConv2DBackpropFilter[T](strideW, strideH, pW, pH, DataFormat.NHWC)

      case "NCHW" =>
        require(strideList(1) == 1, s"not support strides on depth")
        val strideW = strideList(2)
        val strideH = strideList(3)
        DepthwiseConv2DBackpropFilter[T](strideW, strideH, pW, pH, DataFormat.NCHW)
      case _ =>
        throw new IllegalArgumentException(s"not supported data format: $format")
    }
    conv.asInstanceOf[AbstractModule[Activity, Activity, T]]
  }
} 
Example 12
Source File: FusedBatchNormV2.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.DataFormat
import com.intel.analytics.bigdl.nn.tf.{FusedBatchNorm => FusedBatchNormOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class FusedBatchNormV2 extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    val u = getType(nodeDef.getAttrMap, "U")
    require(t == DataType.DT_FLOAT, "T: Only support float batch normal")
    require(u == DataType.DT_FLOAT, "U: Only support float batch normal")
    val eps = getFloat(nodeDef.getAttrMap, "epsilon")
    val dataFormat = getString(nodeDef.getAttrMap, "data_format")
    val isTrain = getBoolean(nodeDef.getAttrMap, "is_training")
    if (dataFormat == "NHWC") {
      FusedBatchNormOps[T](eps, isTrain, dataFormat = DataFormat.NHWC)
    } else {
      FusedBatchNormOps[T](eps, isTrain, dataFormat = DataFormat.NCHW)
    }
  }
} 
Example 13
Source File: Relu6Grad.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.{Relu6Grad => Relu6GradOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Relu6Grad extends TensorflowOpsLoader {


  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      Relu6GradOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      Relu6GradOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load ReLU6 when type is ${t}")
    }
  }
} 
Example 14
Source File: Erfc.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Erfc => ErfcOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Erfc extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
                                 (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      ErfcOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      ErfcOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Erfc when type is ${t}")
    }
  }
} 
Example 15
Source File: AvgPoolGrad.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.DataFormat
import com.intel.analytics.bigdl.nn.tf.{AvgPoolGrad => AvgPoolGradOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class AvgPoolGrad extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val attributes = nodeDef.getAttrMap
    val (pW, pH) =
      if (getString(attributes, "padding") == "SAME") {
        (-1, -1)
      } else {
        (0, 0)
      }
    val strideList = getIntList(attributes, "strides")
    require(strideList.head == 1, s"not support strides on batch")
    val kernelSize = getIntList(attributes, "ksize")
    require(kernelSize.head == 1, s"not support kernel on batch")

    val format = getString(attributes, "data_format")
    val poolgrad = format match {
      case "NHWC" =>
        require(strideList(3) == 1, s"not support strides on depth")
        val strideW = strideList(1)
        val strideH = strideList(2)
        val kW = kernelSize(1)
        val kH = kernelSize(2)
        AvgPoolGradOps[T](kW, kH, strideW, strideH, pW, pH, DataFormat.NHWC)

      case "NCHW" =>
        require(strideList(1) == 1, s"not support strides on depth")
        val strideW = strideList(2)
        val strideH = strideList(3)
        val kW = kernelSize(2)
        val kH = kernelSize(3)
        AvgPoolGradOps[T](kW, kH, strideW, strideH, pW, pH, DataFormat.NCHW)
      case _ =>
        throw new IllegalArgumentException(s"not supported data format: $format")
    }
    poolgrad.asInstanceOf[Module[T]]
  }
} 
Example 16
Source File: EluGrad.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.{EluGrad => EluGradOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class EluGrad extends TensorflowOpsLoader {


  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      EluGradOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      EluGradOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load ReLU6 when type is ${t}")
    }
  }
} 
Example 17
Source File: TopKV2.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.nn.ops.{TopK => TopKOps}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class TopKV2 extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {

    val s = if (nodeDef.getAttrMap.containsKey("sorted")) {
      getBoolean(nodeDef.getAttrMap, "sorted")
    } else {
      true
    }
    val t = getType(nodeDef.getAttrMap, "T")
    val ts = if (t == DataType.DT_FLOAT) {
      "Float"
    } else if (t == DataType.DT_DOUBLE) {
      "Double"
    } else {
      throw new UnsupportedOperationException(s"Not support load Inv when type is ${t}")
    }

    new TopKV2LoadTF[T](s, ts)
  }
}

class TopKV2LoadTF[T: ClassTag](s: Boolean, t: String)(implicit ev: TensorNumeric[T])
  extends Adapter[T](Array(2)) {
  override def build(tensorArrays: Array[Tensor[_]]): AbstractModule[Activity, Activity, T] = {
    val kTensor = tensorArrays(0).asInstanceOf[Tensor[Int]]
    require(kTensor.isScalar, "Invalid input k")
    val k = kTensor.value()

    if (t == "Float") {
      TopKOps[T, Float](k, s, startIndex = 0)
    } else if (t == "Double") {
      TopKOps[T, Double](k, s, startIndex = 0)
    } else {
      throw new UnsupportedOperationException(s"Not support load Inv when type is ${t}")
    }
  }
} 
Example 18
Source File: LRN.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.SpatialCrossMapLRN
import com.intel.analytics.bigdl.nn.abstractnn.DataFormat
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class LRN extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val size = getInt(nodeDef.getAttrMap, "depth_radius")
    val k = getFloat(nodeDef.getAttrMap, "bias")
    val alpha = getFloat(nodeDef.getAttrMap, "alpha")
    val beta = getFloat(nodeDef.getAttrMap, "beta")
    SpatialCrossMapLRN[T](
      size = size * 2 + 1,
      k = k,
      alpha = alpha * (size * 2 + 1),
      beta = beta,
      format = DataFormat.NHWC
    )
  }
} 
Example 19
Source File: Reciprocal.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Inv => InvOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Reciprocal extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {

    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      InvOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      InvOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Inv when type is ${t}")
    }
  }
} 
Example 20
Source File: Mean.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.nn.Sequential
import com.intel.analytics.bigdl.nn.tf.Mean
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag

class Mean extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder
    , context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val attr = nodeDef.getAttrMap
    val dataType = getType(attr, "T")
    val squeeze = !getBoolean(attr, "keep_dims")
    val dt = dataType match {
      case DataType.DT_INT8 =>
        "Int"
      case DataType.DT_INT16 =>
        "Int"
      case DataType.DT_UINT8 =>
        "Int"
      case DataType.DT_UINT16 =>
        "Int"
      case DataType.DT_INT32 =>
        "Int"
      case DataType.DT_INT64 =>
        "Long"
      case DataType.DT_FLOAT =>
        "Float"
      case DataType.DT_DOUBLE =>
        "Double"
      case _ => throw new UnsupportedOperationException("Data Type: " + dataType +
        " is not Unsupported yet.")
    }
    new MeanLoadTF[T](dt, squeeze)
  }
}

class MeanLoadTF[T: ClassTag](val dataType: String,
                              val squeeze: Boolean)(implicit ev: TensorNumeric[T])
  extends Adapter[T](Array(2)) {
  override def build(tensorArrays: Array[Tensor[_]]): AbstractModule[Activity, Activity, T] = {
    val dims = tensorArrays(0).asInstanceOf[Tensor[Int]]
    val dim = ArrayBuffer[Int]()
    val mean = Sequential[T]()
    for (i <- 1 to dims.size(1)) {
      dim += dims.valueAt(i) + 1
    }
    dataType match {
      case "Int" =>
        dim.foreach(i => mean.add(Mean[T, Int](i, squeeze = squeeze)))
      case "Long" =>
        dim.foreach(i => mean.add(Mean[T, Long](i, squeeze = squeeze)))
      case "Float" =>
        dim.foreach(i => mean.add(Mean[T, Float](i, squeeze = squeeze)))
      case "Double" =>
        dim.foreach(i => mean.add(Mean[T, Double](i, squeeze = squeeze)))
      case _ => throw new UnsupportedOperationException("Data Type: " + dataType +
        " is not Unsupported yet.")
    }
    mean
  }
} 
Example 21
Source File: TruncateDiv.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{TruncateDiv => TruncateDivOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class TruncateDiv extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_INT32) {
      TruncateDivOps[T, Int]()
    } else {
      throw new UnsupportedOperationException(s"Not support load TruncateDiv when type is ${t}")
    }
  }
} 
Example 22
Source File: DepthwiseConv2dNativeBackpropInput.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity, DataFormat}
import com.intel.analytics.bigdl.nn.ops.{DepthwiseConv2D, DepthwiseConv2DBackpropInput}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class DepthwiseConv2dNativeBackpropInput extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {

    val attributes = nodeDef.getAttrMap
    val (pW, pH) =
      if (getString(attributes, "padding") == "SAME") {
        (-1, -1)
      } else {
        (0, 0)
      }
    val strideList = getIntList(attributes, "strides")
    require(strideList.head == 1, s"not support strides on batch")

    val format = getString(attributes, "data_format")
    val conv = format match {
      case "NHWC" =>
        require(strideList(3) == 1, s"not support strides on depth")
        val strideW = strideList(1)
        val strideH = strideList(2)
        DepthwiseConv2DBackpropInput[T](strideW, strideH, pW, pH, DataFormat.NHWC)

      case "NCHW" =>
        require(strideList(1) == 1, s"not support strides on depth")
        val strideW = strideList(2)
        val strideH = strideList(3)
        DepthwiseConv2DBackpropInput[T](strideW, strideH, pW, pH, DataFormat.NCHW)
      case _ =>
        throw new IllegalArgumentException(s"not supported data format: $format")
    }
    conv.asInstanceOf[AbstractModule[Activity, Activity, T]]
  }
} 
Example 23
Source File: BatchMatMul.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{BatchMatMul => BatchMatMulOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class BatchMatMul extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    val adjX = getBoolean(nodeDef.getAttrMap, "adj_x")
    val adjY = getBoolean(nodeDef.getAttrMap, "adj_y")
    if (t == DataType.DT_FLOAT) {
      BatchMatMulOps[T, Float](adjX, adjY)
    } else if (t == DataType.DT_DOUBLE) {
      BatchMatMulOps[T, Double](adjX, adjY)
    } else {
      throw new UnsupportedOperationException(s"Not support load ReLU6 when type is $t")
    }
  }
} 
Example 24
Source File: ConcatV2.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.JoinTable
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class ConcatV2 extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    new ConcatV2LoadTF[T]()
  }
}

class ConcatV2LoadTF[T: ClassTag]()(implicit ev: TensorNumeric[T]) extends Adapter[T](Array(-1)) {
  override def build(tensorArrays: Array[Tensor[_]]): AbstractModule[Activity, Activity, T] = {
    val axis = tensorArrays(0).value().asInstanceOf[Int] + 1
    JoinTable[T](dimension = axis, nInputDims = -1)
  }
} 
Example 25
Source File: LRNGrad.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.{LRNGrad => LRNGradOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class LRNGrad extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val size = getInt(nodeDef.getAttrMap, "depth_radius")
    val k = getFloat(nodeDef.getAttrMap, "bias")
    val alpha = getFloat(nodeDef.getAttrMap, "alpha")
    val beta = getFloat(nodeDef.getAttrMap, "beta")

    LRNGradOps[T](size, k, alpha, beta)
  }
} 
Example 26
Source File: FloorDiv.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{FloorDiv => FloorDivOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class FloorDiv extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      FloorDivOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      FloorDivOps[T, Double]()
    } else if (t == DataType.DT_INT32) {
      FloorDivOps[T, Int]()
    } else {
      throw new UnsupportedOperationException(s"Not support load FloorDiv when type is ${t}")
    }
  }
} 
Example 27
Source File: Conv3DBackpropInputV2.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity, DataFormat}
import com.intel.analytics.bigdl.nn.tf.{Conv3DBackpropInputV2 => Conv3DBackpropInputV2Ops}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class Conv3DBackpropInputV2 extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val attributes = nodeDef.getAttrMap
    val (pT, pW, pH) =
      if (getString(attributes, "padding") == "SAME") {
        (-1, -1, -1)
      } else {
        (0, 0, 0)
      }
    val strideList = getIntList(attributes, "strides")
    require(strideList.head == 1, s"not support strides on batch")

    val format = getString(attributes, "data_format")
    val conv = format match {
      case "NDHWC" =>
        require(strideList(4) == 1, s"not support strides on depth")
        val dT = strideList(1)
        val dW = strideList(2)
        val dH = strideList(3)
        Conv3DBackpropInputV2Ops[T](dT, dW, dH, pT, pW, pH, DataFormat.NHWC)
      case "NCDHW" =>
        require(strideList(1) == 1, s"not support strides on depth")
        val dT = strideList(2)
        val dW = strideList(3)
        val dH = strideList(4)
        Conv3DBackpropInputV2Ops[T](dT, dW, dH, pT, pW, pH, DataFormat.NCHW)
      case _ =>
        throw new IllegalArgumentException(s"not supported data format: $format")
    }
    conv.asInstanceOf[AbstractModule[Activity, Activity, T]]
  }
} 
Example 28
Source File: IsNan.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{IsNan => IsNanOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class IsNan extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      IsNanOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      IsNanOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Inv when type is ${t}")
    }
  }
} 
Example 29
Source File: RandomUniform.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{RandomUniform => RandomUniformOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class RandomUniform extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val seed = if (nodeDef.getAttrMap.containsKey("seed")) {
      Some(nodeDef.getAttrMap.get("seed").getI().toInt)
    } else {
      None
    }

    nodeDef.getAttrMap.get("dtype").getType match {
      case DataType.DT_FLOAT =>
        val min = 0
        val max = 1
        RandomUniformOps[T, Float](min, max, seed)
      case DataType.DT_DOUBLE =>
        val min = 0
        val max = 1
        RandomUniformOps[T, Double](min, max, seed)
      case _ =>
        throw new IllegalArgumentException("Not support data type")
    }
  }
} 
Example 30
Source File: Softsign.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.SoftSign
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Softsign extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      SoftSign[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      SoftSign[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load SoftsignGrad when type is ${t}")
    }
  }
} 
Example 31
Source File: SqrtGrad.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.{SqrtGrad => SqrtGradOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class SqrtGrad extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      SqrtGradOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      SqrtGradOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load SqrtGrad when type is $t")
    }
  }
} 
Example 32
Source File: Erf.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Erf => ErfOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Erf extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
                                 (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      ErfOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      ErfOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Erf when type is ${t}")
    }
  }
} 
Example 33
Source File: Conv3DBackpropFilterV2.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity, DataFormat}
import com.intel.analytics.bigdl.nn.tf.{Conv3DBackpropFilterV2 => Conv3DBackpropFilterV2Ops}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class Conv3DBackpropFilterV2 extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val attributes = nodeDef.getAttrMap
    val (pT, pW, pH) =
      if (getString(attributes, "padding") == "SAME") {
        (-1, -1, -1)
      } else {
        (0, 0, 0)
      }
    val strideList = getIntList(attributes, "strides")
    require(strideList.head == 1, s"not support strides on batch")

    val format = getString(attributes, "data_format")
    val conv = format match {
      case "NDHWC" =>
        require(strideList(4) == 1, s"not support strides on depth")
        val dT = strideList(1)
        val dW = strideList(2)
        val dH = strideList(3)
        Conv3DBackpropFilterV2Ops[T](dT, dW, dH, pT, pW, pH, DataFormat.NHWC)
      case "NCDHW" =>
        require(strideList(1) == 1, s"not support strides on depth")
        val dT = strideList(2)
        val dW = strideList(3)
        val dH = strideList(4)
        Conv3DBackpropFilterV2Ops[T](dT, dW, dH, pT, pW, pH, DataFormat.NCHW)
      case _ =>
        throw new IllegalArgumentException(s"not supported data format: $format")
    }
    conv.asInstanceOf[AbstractModule[Activity, Activity, T]]
  }
} 
Example 34
Source File: TanhGrad.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.{TanhGrad => TanhGradOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class TanhGrad extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      TanhGradOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      TanhGradOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load TanhGrad when type is ${t}")
    }
  }
} 
Example 35
Source File: IsInf.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{IsInf => IsInfOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class IsInf extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      IsInfOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      IsInfOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Inv when type is ${t}")
    }
  }
} 
Example 36
Source File: Log1p.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.{Log1p => Log1pOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import org.tensorflow.framework.{DataType, NodeDef}
import com.intel.analytics.bigdl.utils.tf.Context

import scala.reflect.ClassTag

class Log1p extends TensorflowOpsLoader {
  import Utils._
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      Log1pOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      Log1pOps[T, Double]()
    } else {
     throw new UnsupportedOperationException(s"Not support load Log1p when type is ${t}")
    }
  }
} 
Example 37
Source File: SigmoidGrad.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.{SigmoidGrad => SigmoidGradOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class SigmoidGrad extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      SigmoidGradOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      SigmoidGradOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load SigmoidGrad when type is ${t}")
    }
  }
} 
Example 38
Source File: MaxPool.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.SpatialMaxPooling
import com.intel.analytics.bigdl.nn.abstractnn.DataFormat
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class MaxPool extends TensorflowOpsLoader {
  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {

    val attributes = nodeDef.getAttrMap
    val format = getString(attributes, "data_format")
    val strideList = getIntList(attributes, "strides")
    val kernelList = getIntList(attributes, "ksize")
    val (strideH, strideW, ksizeH, ksizeW) = format match {
      case "NHWC" =>
        require(strideList(3) == 1, s"not support strides on depth")
        (strideList(1), strideList(2), kernelList(1), kernelList(2))
      case "NCHW" =>
        require(strideList(1) == 1, s"not support strides on depth")
        (strideList(2), strideList(3), kernelList(2), kernelList(3))
      case _ =>
        throw new IllegalArgumentException(s"not supported data format: $format")
    }

    val (pW, pH) =
      if (getString(attributes, "padding") == "SAME") {
        (-1, -1)
      } else {
        (0, 0)
      }

    SpatialMaxPooling[T](ksizeW, ksizeH, strideW, strideH, pW, pH,
      format = DataFormat(format))
  }
} 
Example 39
Source File: TruncateMod.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Mod => ModOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class TruncateMod extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      ModOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      ModOps[T, Double]()
    } else if (t == DataType.DT_INT32) {
      ModOps[T, Int]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Mod when type is ${t}")
    }
  }
} 
Example 40
Source File: StridedSlice.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.nn.tf.{StridedSlice => StridedSliceOps}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class StridedSlice extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {

    val t = getType(nodeDef, "T")
    val beginMask = getInt(nodeDef.getAttrMap, "begin_mask")
    val ellipsisMask = getInt(nodeDef.getAttrMap, "ellipsis_mask")
    val endMask = getInt(nodeDef.getAttrMap, "end_mask")
    val newAxisMask = getInt(nodeDef.getAttrMap, "new_axis_mask")
    val shrinkAxisMask = getInt(nodeDef.getAttrMap, "shrink_axis_mask")

    if (t == DataType.DT_INT32) {
      StridedSliceOps[T, Int](beginMask, endMask, ellipsisMask,
        newAxisMask, shrinkAxisMask, true)
    } else if (t == DataType.DT_FLOAT) {
      StridedSliceOps[T, Float](beginMask, endMask, ellipsisMask,
        newAxisMask, shrinkAxisMask, true)
    } else if (t == DataType.DT_DOUBLE) {
      StridedSliceOps[T, Double](beginMask, endMask, ellipsisMask,
        newAxisMask, shrinkAxisMask, true)
    } else {
      throw new UnsupportedOperationException(s"Not support load StridedSlice with type ${t}")
    }
  }
}

object StridedSlice {
  def oneDTensorToArray(tensor: Tensor[Int]): Array[Int] = {
    require(tensor.nDimension() == 1, "1D tensor required")
    val result = new Array[Int](tensor.nElement())
    var i = 0
    while(i < tensor.nElement()) {
      result(i) = tensor.valueAt(i + 1)
      i += 1
    }
    result
  }
} 
Example 41
Source File: Digamma.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Digamma => DigammaOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Digamma extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
                                 (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      DigammaOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      DigammaOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Digamma when type is ${t}")
    }
  }
} 
Example 42
Source File: SoftplusGrad.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.{SoftplusGrad => SoftplusGradOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class SoftplusGrad extends TensorflowOpsLoader {

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      SoftplusGradOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      SoftplusGradOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load SoftplusGrad when type is ${t}")
    }
  }
} 
Example 43
Source File: Dilation2DBackpropFilter.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Dilation2DBackpropFilter => Dilation2DBackpropFilterOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Dilation2DBackpropFilter extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val attributes = nodeDef.getAttrMap
    val padding = getString(attributes, "padding")
    val strides = getIntList(attributes, "strides").toArray
    val rates = getIntList(attributes, "rates").toArray
    val t = getType(nodeDef.getAttrMap, "T")

    if (t == DataType.DT_FLOAT) {
      Dilation2DBackpropFilterOps[T, Float](strides, rates, padding)
    } else if (t == DataType.DT_DOUBLE) {
      Dilation2DBackpropFilterOps[T, Double](strides, rates, padding)
    } else {
      throw new UnsupportedOperationException(
        s"Not support load Dilation2DBackpropFilter when type is ${t}")
    }
  }
} 
Example 44
Source File: Rsqrt.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.Power
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Rsqrt extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      Power[T, Float](-0.5, 1, 0)
    } else if (t == DataType.DT_DOUBLE) {
      Power[T, Double](-0.5, 1, 0)
    } else {
      throw new UnsupportedOperationException(s"Not support load Rsqrt when type is $t")
    }
  }
} 
Example 45
Source File: InvGrad.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{InvGrad => InvGradOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class InvGrad extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      InvGradOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      InvGradOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Inv when type is ${t}")
    }
  }
} 
Example 46
Source File: MaxPoolGrad.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.DataFormat
import com.intel.analytics.bigdl.nn.tf.{MaxPoolGrad => MaxPoolGradOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class MaxPoolGrad extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val attributes = nodeDef.getAttrMap
    val (pW, pH) =
      if (getString(attributes, "padding") == "SAME") {
        (-1, -1)
      } else {
        (0, 0)
      }
    val strideList = getIntList(attributes, "strides")
    require(strideList.head == 1, s"not support strides on batch")
    val kernelSize = getIntList(attributes, "ksize")
    require(kernelSize.head == 1, s"not support kernel on batch")

    val format = getString(attributes, "data_format")
    val poolgrad = format match {
      case "NHWC" =>
        require(strideList(3) == 1, s"not support strides on depth")
        val strideW = strideList(1)
        val strideH = strideList(2)
        val kW = kernelSize(1)
        val kH = kernelSize(2)
        MaxPoolGradOps[T](kW, kH, strideW, strideH, pW, pH, DataFormat.NHWC)

      case "NCHW" =>
        require(strideList(1) == 1, s"not support strides on depth")
        val strideW = strideList(2)
        val strideH = strideList(3)
        val kW = kernelSize(2)
        val kH = kernelSize(3)
        MaxPoolGradOps[T](kW, kH, strideW, strideH, pW, pH, DataFormat.NCHW)
      case _ =>
        throw new IllegalArgumentException(s"not supported data format: $format")
    }
    poolgrad.asInstanceOf[Module[T]]
  }
} 
Example 47
Source File: FusedBatchNormGrad.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.DataFormat
import com.intel.analytics.bigdl.nn.tf.{FusedBatchNormGrad => FusedBatchNormGradOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class FusedBatchNormGrad extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val eps = getFloat(nodeDef.getAttrMap, "epsilon")
    val dataFormat = getString(nodeDef.getAttrMap, "data_format")
    val isTrain = getBoolean(nodeDef.getAttrMap, "is_training")
    FusedBatchNormGradOps[T](eps,
      if (dataFormat == "NHWC") DataFormat.NHWC else DataFormat.NCHW,
      isTrain)
  }
} 
Example 48
Source File: Conv3DBackpropFilter.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.DataFormat
import com.intel.analytics.bigdl.nn.tf.{Conv3DBackpropFilter => Conv3DBackpropFilterOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class Conv3DBackpropFilter extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val attributes = nodeDef.getAttrMap
    val (pT, pW, pH) =
      if (getString(attributes, "padding") == "SAME") {
        (-1, -1, -1)
      } else {
        (0, 0, 0)
      }
    val strideList = getIntList(attributes, "strides")
    require(strideList.head == 1, s"not support strides on batch")

    require(strideList(4) == 1, s"not support strides on depth")
    val dT = strideList(1)
    val dW = strideList(2)
    val dH = strideList(3)
    Conv3DBackpropFilterOps[T](dT, dW, dH, pT, pW, pH, DataFormat.NHWC)
  }
} 
Example 49
Source File: Maximum.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Maximum => MaximumOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import org.tensorflow.framework.{DataType, NodeDef}
import Utils._
import com.intel.analytics.bigdl.utils.tf.Context

import scala.reflect.ClassTag

class Maximum extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      MaximumOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      MaximumOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Maximum when type is $t")
    }
  }
} 
Example 50
Source File: Gather.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Gather => GatherOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Gather extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "Tparams")
    if (t == DataType.DT_FLOAT) {
      GatherOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      GatherOps[T, Double]()
    } else if (t == DataType.DT_INT32) {
      GatherOps[T, Int]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Gather when type is ${t}")
    }
  }
} 
Example 51
Source File: Ceil.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Ceil => CeilOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Ceil extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      CeilOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      CeilOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"not support load Cell operation for type $t")
    }
  }
} 
Example 52
Source File: Expm1.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Expm1 => Expm1Ops}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Expm1 extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      Expm1Ops[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      Expm1Ops[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Expm1 when type is ${t}")
    }
  }
} 
Example 53
Source File: TopK.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{TopK => TopKOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class TopK extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val k = getInt(nodeDef.getAttrMap, "k")
    val s = if (nodeDef.getAttrMap.containsKey("sorted")) {
      getBoolean(nodeDef.getAttrMap, "sorted")
    } else {
      true
    }
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      TopKOps[T, Float](k, s, startIndex = 0)
    } else if (t == DataType.DT_DOUBLE) {
      TopKOps[T, Double](k, s, startIndex = 0)
    } else {
      throw new UnsupportedOperationException(s"Not support load Inv when type is ${t}")
    }
  }
} 
Example 54
Source File: Add.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.CAddTable
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Add extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder
    , context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      new CAddTable[T, Float]()
    } else if (t == DataType.DT_INT32) {
      new CAddTable[T, Int]()
    } else if (t == DataType.DT_DOUBLE) {
      new CAddTable[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support numeric type $t")
    }
  }
} 
Example 55
Source File: Conv2D.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity, DataFormat}
import com.intel.analytics.bigdl.nn.tf.{Conv2D => Conv2DOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class Conv2D extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val attributes = nodeDef.getAttrMap
    val (pW, pH) =
      if (getString(attributes, "padding") == "SAME") {
        (-1, -1)
      } else {
        (0, 0)
      }
    val strideList = getIntList(attributes, "strides")
    require(strideList.head == 1, s"not support strides on batch")

    val format = getString(attributes, "data_format")
    val conv = format match {
      case "NHWC" =>
        require(strideList(3) == 1, s"not support strides on depth")
        val strideW = strideList(1)
        val strideH = strideList(2)
        Conv2DOps[T](strideW, strideH, pW, pH, DataFormat.NHWC)

      case "NCHW" =>
        require(strideList(1) == 1, s"not support strides on depth")
        val strideW = strideList(2)
        val strideH = strideList(3)
        Conv2DOps[T](strideW, strideH, pW, pH, DataFormat.NCHW)
      case _ =>
        throw new IllegalArgumentException(s"not supported data format: $format")
    }
    conv.asInstanceOf[AbstractModule[Activity, Activity, T]]
  }
} 
Example 56
Source File: ParseSingleExample.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.google.protobuf.ByteString
import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.{ParseSingleExample => ParseSingleExampleOperation}
import com.intel.analytics.bigdl.tensor._
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import collection.JavaConverters._
import scala.reflect.ClassTag

class ParseSingleExample extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val Tdense = nodeDef.getAttrMap.get("Tdense")
      .getList.getTypeList.asScala
      .map {
        case DataType.DT_INT64 => LongType
        case DataType.DT_INT32 => IntType
        case DataType.DT_FLOAT => FloatType
        case DataType.DT_DOUBLE => DoubleType
        case DataType.DT_STRING => StringType
        case _ => throw new IllegalArgumentException()
      }
    val denseKeysByteArray = nodeDef.getAttrMap.get("dense_keys").getList.
      getSList.asScala.map(_.toByteArray)
    val denseKeys = denseKeysByteArray.map(ByteString.copyFrom(_))
    val denseShapes = nodeDef.getAttrMap.get("dense_shapes")
      .getList.getShapeList.asScala
      .map { shapeProto =>
        shapeProto.getDimList.asScala.map(_.getSize.toInt).toArray
      }

    new ParseSingleExampleOperation[T](Tdense, denseKeys, denseShapes)
  }
} 
Example 57
Source File: Abs.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.Abs
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Abs extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      Abs[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      Abs[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Abs when type is ${t}")
    }
  }
} 
Example 58
Source File: Conv2DBackpropInput.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity, DataFormat}
import com.intel.analytics.bigdl.nn.tf.Conv2DTranspose
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag
import Utils._
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.tf.Context

class Conv2DBackpropInput extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {

    val attributes = nodeDef.getAttrMap
    val (pW, pH) =
      if (getString(attributes, "padding") == "SAME") {
        (-1, -1)
      } else {
        (0, 0)
      }
    val strideList = getIntList(attributes, "strides")
    require(strideList.head == 1, s"not support strides on batch")

    val format = getString(attributes, "data_format")
    val deconv = format match {
      case "NHWC" =>
        require(strideList(3) == 1, s"not support strides on depth")
        val strideW = strideList(1)
        val strideH = strideList(2)
        Conv2DTranspose[T](strideW, strideH, pW, pH, DataFormat.NHWC)

      case "NCHW" =>
        require(strideList(1) == 1, s"not support strides on depth")
        val strideW = strideList(2)
        val strideH = strideList(3)
        Conv2DTranspose[T](strideW, strideH, pW, pH, DataFormat.NCHW)
      case _ =>
        throw new IllegalArgumentException(s"not supported data format: $format")
    }
    deconv.asInstanceOf[AbstractModule[Activity, Activity, T]]
  }
} 
Example 59
Source File: BiasAddGrad.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.DataFormat
import com.intel.analytics.bigdl.nn.tf.{BiasAddGrad => BiasAddGradOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class BiasAddGrad extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {

    val format = if (getString(nodeDef.getAttrMap, "data_format") == "NHWC") {
      DataFormat.NHWC
    } else {
      DataFormat.NCHW
    }
    BiasAddGradOps[T](format)
  }
} 
Example 60
Source File: ExpandDims.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.Unsqueeze
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class ExpandDims extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    new ExpandDimsLoadTF[T]()
  }
}

class ExpandDimsLoadTF[T: ClassTag]()(implicit ev: TensorNumeric[T]) extends Adapter[T](Array(2)) {
  override def build(tensorArrays: Array[Tensor[_]]): AbstractModule[Activity, Activity, T] = {
    val axis = tensorArrays(0).asInstanceOf[Tensor[Int]].value()
    Unsqueeze[T](if (axis < 0) axis + 1 else axis + 1)
  }
} 
Example 61
Source File: DepthwiseConv2dNative.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity, DataFormat}
import com.intel.analytics.bigdl.nn.ops.DepthwiseConv2D
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class DepthwiseConv2dNative extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val attributes = nodeDef.getAttrMap
    val (pW, pH) =
      if (getString(attributes, "padding") == "SAME") {
        (-1, -1)
      } else {
        (0, 0)
      }
    val strideList = getIntList(attributes, "strides")
    require(strideList.head == 1, s"not support strides on batch")

    val format = if (attributes.containsKey("data_format")) {
      getString(attributes, "data_format")
    } else {
      "NHWC"
    }

    val conv = format match {
      case "NHWC" =>
        require(strideList(3) == 1, s"not support strides on depth")
        val strideW = strideList(1)
        val strideH = strideList(2)
        DepthwiseConv2D[T](strideW, strideH, pW, pH, DataFormat.NHWC)

      case "NCHW" =>
        require(strideList(1) == 1, s"not support strides on depth")
        val strideW = strideList(2)
        val strideH = strideList(3)
        DepthwiseConv2D[T](strideW, strideH, pW, pH, DataFormat.NCHW)
      case _ =>
        throw new IllegalArgumentException(s"not supported data format: $format")
    }
    conv.asInstanceOf[AbstractModule[Activity, Activity, T]]
  }
} 
Example 62
Source File: Transpose.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.nn.{Contiguous, Sequential, Transpose => TransposeLayer}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag

class Transpose extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder
  , context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    new TransposeLoadTF[T]()
  }
}

object TransposeLoadTF {

  def permToPair(perm: Array[Int]): Array[(Int, Int)] = {
    val numToRank = perm.zipWithIndex.toMap
    val arr = perm.indices.toArray
    val pairs = ArrayBuffer[(Int, Int)]()

    def sort(arr: Array[Int], low: Int, high: Int): Unit = {
      var i = low
      var j = high
      val pivot = arr(low + (high - low)/2)

      while (i <= j) {
        while (arr(i) < pivot) i += 1
        while (arr(j) > pivot) j -= 1

        if (i <= j) {
          exchangeNumbers(arr, i, j)
          i += 1
          j -= 1
        }
      }

      if (low < j) sort(arr, low, j)
      if (i < high) sort(arr, i, high)
    }

    def exchangeNumbers(arr: Array[Int], i: Int, j: Int): Unit = {
      val temp = arr(i)
      arr(i) = arr(j)
      arr(j) = temp
      pairs += ((i, j))
    }

    sort(arr.map(numToRank), 0, arr.length-1)

    pairs.filter(pair => pair._1 != pair._2).toArray
  }
}

class TransposeLoadTF[T: ClassTag]()(implicit ev: TensorNumeric[T]) extends Adapter[T](Array(2)) {
  import TransposeLoadTF._

  override def build(tensorArrays: Array[Tensor[_]]): AbstractModule[Activity, Activity, T] = {
    val perm = tensorArrays(0).asInstanceOf[Tensor[Int]].storage().array()
    val paris = permToPair(perm)
    val layer = Sequential()
    layer.add(TransposeLayer[T](paris.map(x => (x._1 + 1, x._2 + 1))))
    layer.add(Contiguous())
    layer
  }
} 
Example 63
Source File: Sign.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Sign => SignOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Sign extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      SignOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      SignOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Inv when type is ${t}")
    }
  }
} 
Example 64
Source File: RsqrtGrad.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.{RsqrtGrad => RsqrtGradOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class RsqrtGrad extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      RsqrtGradOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      RsqrtGradOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load RsqrtGrad when type is $t")
    }
  }
} 
Example 65
Source File: Exp.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Exp => ExpOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Exp extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      ExpOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      ExpOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Exp when type is ${t}")
    }
  }
} 
Example 66
Source File: Lgamma.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Lgamma => LgammaOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Lgamma extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
                                 (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      LgammaOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      LgammaOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Lgamma when type is ${t}")
    }
  }
} 
Example 67
Source File: FusedBatchNorm.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.DataFormat
import com.intel.analytics.bigdl.nn.tf.{FusedBatchNorm => FusedBatchNormOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class FusedBatchNorm extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    require(t == DataType.DT_FLOAT, "Only support float batch normal")
    val eps = getFloat(nodeDef.getAttrMap, "epsilon")
    val dataFormat = getString(nodeDef.getAttrMap, "data_format")
    val isTrain = getBoolean(nodeDef.getAttrMap, "is_training")
    if (dataFormat == "NHWC") {
      FusedBatchNormOps[T](eps, isTrain, dataFormat = DataFormat.NHWC)
    } else {
      FusedBatchNormOps[T](eps, isTrain, dataFormat = DataFormat.NCHW)
    }
  }
} 
Example 68
Source File: Max.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Max => MaxOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Max extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    val keepDims = getBoolean(nodeDef.getAttrMap, "keep_dims")
    if (t == DataType.DT_FLOAT) {
      MaxOps[T, Float](keepDims, true)
    } else if (t == DataType.DT_DOUBLE) {
      MaxOps[T, Double](keepDims, true)
    } else if (t == DataType.DT_INT32) {
      MaxOps[T, Int](keepDims, true)
    } else {
      throw new UnsupportedOperationException(s"Not support load Gather when type is ${t}")
    }
  }
} 
Example 69
Source File: Elu.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.ELU
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Elu extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
     context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      ELU[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      ELU[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load ELU when type is ${t}")
    }
  }
} 
Example 70
Source File: FusedBatchNormGradV2.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.DataFormat
import com.intel.analytics.bigdl.nn.tf.{FusedBatchNormGrad => FusedBatchNormGradOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class FusedBatchNormGradV2 extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val eps = getFloat(nodeDef.getAttrMap, "epsilon")
    val dataFormat = getString(nodeDef.getAttrMap, "data_format")
    val isTrain = getBoolean(nodeDef.getAttrMap, "is_training")
    FusedBatchNormGradOps[T](eps,
      if (dataFormat == "NHWC") DataFormat.NHWC else DataFormat.NCHW,
      isTrain)
  }
} 
Example 71
Source File: Cast.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Cast => CastOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Cast extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder
    , context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val attr = nodeDef.getAttrMap
    val dataType = getType(attr, "DstT")

    val layer = dataType match {
      case DataType.DT_INT8 => CastOps[T, Int]()
      case DataType.DT_INT16 => CastOps[T, Int]()
      case DataType.DT_UINT8 => CastOps[T, Int]()
      case DataType.DT_UINT16 => CastOps[T, Int]()
      case DataType.DT_INT32 => CastOps[T, Int]()
      case DataType.DT_INT64 => CastOps[T, Int]()
      case DataType.DT_BOOL => CastOps[T, Boolean]()
      case DataType.DT_STRING => CastOps[T, String]()
      case DataType.DT_FLOAT => CastOps[T, Float]()
      case DataType.DT_DOUBLE => CastOps[T, Double]()
      case _ => throw new UnsupportedOperationException("Unsupported data type: "
        + dataType.toString)
    }
    layer
  }
} 
Example 72
Source File: Dilation2D.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Dilation2D => Dilation2DOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Dilation2D extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val attributes = nodeDef.getAttrMap
    val padding = getString(attributes, "padding")
    val strides = getIntList(attributes, "strides").toArray
    val rates = getIntList(attributes, "rates").toArray
    val t = getType(nodeDef.getAttrMap, "T")

    if (t == DataType.DT_FLOAT) {
      Dilation2DOps[T, Float](strides, rates, padding)
    } else if (t == DataType.DT_DOUBLE) {
      Dilation2DOps[T, Double](strides, rates, padding)
    } else {
      throw new UnsupportedOperationException(s"Not support load Dilation2D when type is ${t}")
    }
  }
} 
Example 73
Source File: Dilation2DBackpropInput.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Dilation2DBackpropInput => Dilation2DBackpropInputOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Dilation2DBackpropInput extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val attributes = nodeDef.getAttrMap
    val padding = getString(attributes, "padding")
    val strides = getIntList(attributes, "strides").toArray
    val rates = getIntList(attributes, "rates").toArray
    val t = getType(nodeDef.getAttrMap, "T")

    if (t == DataType.DT_FLOAT) {
      Dilation2DBackpropInputOps[T, Float](strides, rates, padding)
    } else if (t == DataType.DT_DOUBLE) {
      Dilation2DBackpropInputOps[T, Double](strides, rates, padding)
    } else {
      throw new UnsupportedOperationException(
        s"Not support load Dilation2DBackpropInput when type is ${t}")
    }
  }
} 
Example 74
Source File: Square.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.Power
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Square extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      Power[T, Float](2.0)
    } else if (t == DataType.DT_DOUBLE) {
      Power[T, Double](2.0)
    } else {
      throw new UnsupportedOperationException(s"Not support load Square when type is $t")
    }
  }
} 
Example 75
Source File: SoftsignGrad.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.{SoftsignGrad => SoftsignGradOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class SoftsignGrad extends TensorflowOpsLoader {


  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      SoftsignGradOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      SoftsignGradOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load SoftsignGrad when type is ${t}")
    }
  }
} 
Example 76
Source File: Conv3DBackpropInput.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.DataFormat
import com.intel.analytics.bigdl.nn.tf.{Conv3DBackpropInput => Conv3DBackpropInputOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class Conv3DBackpropInput extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val attributes = nodeDef.getAttrMap
    val (pT, pW, pH) =
      if (getString(attributes, "padding") == "SAME") {
        (-1, -1, -1)
      } else {
        (0, 0, 0)
      }
    val strideList = getIntList(attributes, "strides")
    require(strideList.head == 1, s"not support strides on batch")

    require(strideList(4) == 1, s"not support strides on depth")
    val dT = strideList(1)
    val dW = strideList(2)
    val dH = strideList(3)
    Conv3DBackpropInputOps[T](dT, dW, dH, pT, pW, pH, DataFormat.NHWC)
  }
} 
Example 77
Source File: Range.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}
import com.intel.analytics.bigdl.nn.ops.RangeOps

import scala.reflect.ClassTag

class Range extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "Tidx")
    if (t == DataType.DT_FLOAT) {
      RangeOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      RangeOps[T, Double]()
    } else if (t == DataType.DT_INT32) {
      RangeOps[T, Int]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Log when type is ${t}")
    }
  }
} 
Example 78
Source File: ReciprocalGrad.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{InvGrad => InvGradOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class ReciprocalGrad extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {

    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      InvGradOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      InvGradOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Inv when type is ${t}")
    }
  }
} 
Example 79
Source File: FloorMod.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{FloorMod => FloorModOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class FloorMod extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      FloorModOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      FloorModOps[T, Double]()
    } else if (t == DataType.DT_INT32) {
      FloorModOps[T, Int]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Mod when type is ${t}")
    }
  }
} 
Example 80
Source File: Pad.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.nn.{Padding, Sequential}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.{Context, TFUtils}
import org.tensorflow.framework.NodeDef

import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag

class Pad extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    new PadLoadTF[T]()
  }
}

class PadLoadTF[T: ClassTag]()(implicit ev: TensorNumeric[T]) extends Adapter[T](Array(2)) {
  override def build(tensorArrays: Array[Tensor[_]]): AbstractModule[Activity, Activity, T] = {
    val paddings = tensorArrays(0).asInstanceOf[Tensor[Int]]
    val pad = ArrayBuffer[Int]()
    val padding = Sequential[T]()

    for(dim <- 1 to paddings.size(1)) {
      if (paddings.valueAt(dim, 1) != 0 || paddings.valueAt(dim, 2) != 0 ) {
        if (paddings(Array(dim, 1)) != 0) {
          padding.add(Padding[T](dim, -paddings.valueAt(dim, 1), 4))
        }
        if (paddings(Array(dim, 2)) != 0) {
          padding.add(Padding[T](dim, paddings.valueAt(dim, 2), 4))
        }
      }
    }

    padding
  }
} 
Example 81
Source File: Mod.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Mod => ModOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Mod extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      ModOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      ModOps[T, Double]()
    } else if (t == DataType.DT_INT32) {
      ModOps[T, Int]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Mod when type is ${t}")
    }
  }
} 
Example 82
Source File: Reshape.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.{Reshape => ReshapeOps}
import com.intel.analytics.bigdl.nn.InferReshape
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class Reshape extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    new ReshapeLoadTF[T]()
  }
}

class ReshapeLoadTF[T: ClassTag]()(implicit ev: TensorNumeric[T]) extends Adapter[T](Array(2)) {
  override def build(tensorArrays: Array[Tensor[_]]): AbstractModule[Activity, Activity, T] = {
    val sizes = tensorArrays(0).asInstanceOf[Tensor[Int]]


    val batchMode = if (sizes.nDimension() >= 1 && sizes.nElement() > 0) {
      sizes.valueAt(1) == -1
    } else {
      false
    }
    val arraySize = new Array[Int](if (batchMode) sizes.nElement() - 1 else sizes.nElement())
    var i = if (batchMode) 2 else 1
    var k = 0
    while(i <= sizes.nElement()) {
      arraySize(k) = sizes.valueAt(i)
      k += 1
      i += 1
    }
    val infer = arraySize.contains(-1)
    if (infer) InferReshape[T](size = arraySize, batchMode)
    else ReshapeOps[T](size = arraySize, Some(batchMode))
  }
} 
Example 83
Source File: Softplus.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.SoftPlus
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Softplus extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      SoftPlus[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      SoftPlus[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load SoftPlus when type is ${t}")
    }
  }
} 
Example 84
Source File: ControlFlowOps.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.{MergeOps, SwitchOps, Enter => EnterOps, Exit => ExitOps,
  LoopCondition => LoopConditionOps, NextIteration => NextIterationOps}
import com.intel.analytics.bigdl.nn.tf.ControlDependency
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.Tensorflow._
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

private[bigdl] class Switch extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder
    , context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    new SwitchOps[T]()
  }
}

private[bigdl] class Exit extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder
    , context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    new ExitOps[T]()
  }
}

private[bigdl] class NextIteration extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder
    , context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      new NextIterationOps[T, Float]()
    } else if (t == DataType.DT_INT32) {
      new NextIterationOps[T, Int]()
    } else {
      throw new UnsupportedOperationException(s"Not support numeric type $t")
    }
  }
}

private[bigdl] class Enter extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder
    , context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val frameName = stringAttr(nodeDef, "frame_name")
    new EnterOps[T](frameName)
  }
}

private[bigdl] class RefEnter extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder
    , context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val frameName = stringAttr(nodeDef, "frame_name")
    new EnterOps[T](frameName)
  }
}

private[bigdl] class LoopCond extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder
    , context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    new LoopConditionOps[T]()
  }
}

private[bigdl] class Merge extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder
    , context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    new MergeOps[T]()
  }
}

private[bigdl] class ControlTrigger extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder
    , context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    new ControlDependency[T]()
  }
} 
Example 85
Source File: Log.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.Log
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import org.tensorflow.framework.{DataType, NodeDef}
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType

import scala.reflect.ClassTag

class Log extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      Log[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      Log[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Log when type is ${t}")
    }
  }
} 
Example 86
Source File: Inv.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Inv => InvOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Inv extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      InvOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      InvOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Inv when type is ${t}")
    }
  }
} 
Example 87
Source File: Sum.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Sum => SumOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Sum extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder
    , context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val attr = nodeDef.getAttrMap
    val keepDims = getBoolean(attr, "keep_dims")
    val dataType = getType(attr, "T")
    dataType match {
      case DataType.DT_INT8 =>
        SumOps[T, Int](keepDims, startFromZero = true)
      case DataType.DT_INT16 =>
        SumOps[T, Int](keepDims, startFromZero = true)
      case DataType.DT_UINT8 =>
        SumOps[T, Int](keepDims, startFromZero = true)
      case DataType.DT_UINT16 =>
        SumOps[T, Int](keepDims, startFromZero = true)
      case DataType.DT_INT32 =>
        SumOps[T, Int](keepDims, startFromZero = true)
      case DataType.DT_INT64 =>
        SumOps[T, Int](keepDims, startFromZero = true)
      case DataType.DT_FLOAT =>
        SumOps[T, Float](keepDims, startFromZero = true)
      case DataType.DT_DOUBLE =>
        SumOps[T, Double](keepDims, startFromZero = true)
      case _ => throw new UnsupportedOperationException()
    }
  }
} 
Example 88
Source File: ParseExample.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.{ParseExample => ParseExampleOperation}
import com.intel.analytics.bigdl.tensor._
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import collection.JavaConverters._
import scala.reflect.ClassTag

class ParseExample extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val Ndense = nodeDef.getAttrMap.get("Ndense").getI.toInt
    val Tdense = nodeDef.getAttrMap.get("Tdense")
      .getList.getTypeList.asScala
      .map {
        case DataType.DT_INT64 => LongType
        case DataType.DT_INT32 => IntType
        case DataType.DT_FLOAT => FloatType
        case DataType.DT_DOUBLE => DoubleType
        case DataType.DT_STRING => StringType
        case _ => throw new UnsupportedOperationException("Unsupported data type")
      }
    val denseShapes = nodeDef.getAttrMap.get("dense_shapes")
      .getList.getShapeList.asScala
      .map { shapeProto =>
        shapeProto.getDimList.asScala.map(_.getSize.toInt).toArray
      }

    new ParseExampleOperation[T](Ndense, Tdense, denseShapes)
  }
} 
Example 89
Source File: Slice.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.nn.ops.{Slice => SliceOps}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class Slice extends TensorflowOpsLoader {


  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    new SliceLoadTF[T]()
  }
}

class SliceLoadTF[T: ClassTag]()(implicit ev: TensorNumeric[T]) extends Adapter[T](Array(2, 3)) {
  import Utils._

  override def build(tensorArrays: Array[Tensor[_]]): AbstractModule[Activity, Activity, T] = {
    val size = tensorArrays(1).asInstanceOf[Tensor[Int]]
    SliceOps[T](toArray(tensorArrays(0).asInstanceOf[Tensor[Int]]),
      toArray(tensorArrays(1).asInstanceOf[Tensor[Int]]))
  }
} 
Example 90
Source File: Const.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.google.protobuf.ByteString
import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.{Const => ConstOps}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric.{NumericBoolean, NumericChar, NumericDouble, NumericFloat, NumericInt, NumericLong, NumericShort, NumericString}
import com.intel.analytics.bigdl.utils.tf.TFTensorNumeric.NumericByteString
import com.intel.analytics.bigdl.utils.tf.{Context, TFUtils}
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class Const extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val value = TFUtils.parseTensor(nodeDef.getAttrMap.get("value").getTensor, byteOrder)
    val const = value.getTensorNumeric() match {
      case NumericFloat => ConstOps[T, Float](value.asInstanceOf[Tensor[Float]])
      case NumericDouble => ConstOps[T, Double](value.asInstanceOf[Tensor[Double]])
      case NumericInt => ConstOps[T, Int](value.asInstanceOf[Tensor[Int]])
      case NumericLong => ConstOps[T, Long](value.asInstanceOf[Tensor[Long]])
      case NumericChar => ConstOps[T, Char](value.asInstanceOf[Tensor[Char]])
      case NumericBoolean => ConstOps[T, Boolean](value.asInstanceOf[Tensor[Boolean]])
      case NumericShort => ConstOps[T, Short](value.asInstanceOf[Tensor[Short]])
      case NumericString => ConstOps[T, String](value.asInstanceOf[Tensor[String]])
      case NumericByteString => ConstOps[T, ByteString](value.asInstanceOf[Tensor[ByteString]])
    }
    const.asInstanceOf[Module[T]]
  }
} 
Example 91
Source File: AvgPool.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.SpatialAveragePooling
import com.intel.analytics.bigdl.nn.abstractnn.DataFormat
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class AvgPool extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {

    val attributes = nodeDef.getAttrMap
    val format = getString(attributes, "data_format")
    val strideList = getIntList(attributes, "strides")
    val kernelList = getIntList(attributes, "ksize")

    val (strideH, strideW, ksizeH, ksizeW) = format match {
      case "NHWC" =>
        require(strideList(3) == 1, s"not support strides on depth")
        (strideList(1), strideList(2), kernelList(1), kernelList(2))
      case "NCHW" =>
        require(strideList(1) == 1, s"not support strides on depth")
        (strideList(2), strideList(3), kernelList(2), kernelList(3))
      case _ =>
        throw new IllegalArgumentException(s"not supported data format: $format")
    }

    val (pW, pH) =
      if (getString(attributes, "padding") == "SAME") {
        (-1, -1)
      } else {
        (0, 0)
      }

    SpatialAveragePooling[T](ksizeW, ksizeH, strideW, strideH, pW, pH,
      countIncludePad = false, format = DataFormat(format))
  }
} 
Example 92
Source File: ArrayOps.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.{InvertPermutation => InvertPermutationOps,
  ConcatOffset => ConcatOffsetOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

private[bigdl] class InvertPermutation extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    new InvertPermutationOps[T]()
  }
}

private[bigdl] class ConcatOffset extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    new ConcatOffsetOps[T]()
  }
} 
Example 93
Source File: Prod.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.nn.ops.{Prod => ProdOps}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class Prod extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    new ProdLoadTF[T]()
  }
}

class ProdLoadTF[T: ClassTag]()(implicit ev: TensorNumeric[T]) extends Adapter[T](Array(2)) {
  override def build(tensorArrays: Array[Tensor[_]]): AbstractModule[Activity, Activity, T] = {
    val axis = tensorArrays(0).asInstanceOf[Tensor[Int]].value() + 1
    ProdOps[T](axis)
  }
} 
Example 94
Source File: Utils.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder
import java.nio.charset.Charset
import java.util

import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.TensorflowToBigDL.toTensor
import org.tensorflow.framework.{AttrValue, DataType, NodeDef}

import scala.reflect.ClassTag
import collection.JavaConverters._

object Utils {
  private[loaders] def getOrSetTensor[T: ClassTag](
    node: NodeDef, context: Context[T], byteOrder: ByteOrder,
    trans: Option[Seq[(Int, Int)]] = None)(
    implicit ev: TensorNumeric[T]): (Tensor[T], Tensor[T]) = {

    if (context.containsTensor(node.getName)) {
      val result = context(node.getName)
      (result._1, result._2)
    } else {
      var weight = toTensor(node.getAttrMap.get("value").getTensor, byteOrder)
        .asInstanceOf[Tensor[T]]
      trans match {
        case Some(transposes) =>
          for ((first, second) <- transposes) {
            weight = weight.transpose(first, second)
          }
          weight = weight.contiguous()
        case _ =>
      }
      val gradient = Tensor[T](weight.size())
      context.putTensor(node.getName, (weight, gradient, trans))
      (weight, gradient)
    }
  }

  private[loaders] def getString(attrMap: util.Map[String, AttrValue], key: String): String = {
    require(attrMap.containsKey(key), s"Operation doesn't contain attributed $key")
    attrMap.get(key).getS.toString(Charset.defaultCharset())
  }

  private[loaders] def getString(nodeDef: NodeDef, key: String): String = {
    getString(nodeDef.getAttrMap, key)
  }

  private[loaders] def getInt(attrMap: util.Map[String, AttrValue], key: String): Int = {
    require(attrMap.containsKey(key), s"Operation doesn't contain attributed $key")
    attrMap.get(key).getI.toInt
  }

  private[loaders] def getFloat(attrMap: util.Map[String, AttrValue], key: String): Float = {
    require(attrMap.containsKey(key), s"Operation doesn't contain attributed $key")
    attrMap.get(key).getF
  }

  private[loaders] def getBoolean(attrMap: util.Map[String, AttrValue], key: String): Boolean = {
    require(attrMap.containsKey(key), s"Operation doesn't contain attributed $key")
    attrMap.get(key).getB
  }

  private[loaders] def getBoolean(nodeDef: NodeDef, key: String): Boolean = {
    getBoolean(nodeDef.getAttrMap, key)
  }

  private[loaders] def getIntList(attrMap: util.Map[String, AttrValue], key: String): Seq[Int] = {
    require(attrMap.containsKey(key), s"Operation doesn't contain attributed $key")
    attrMap.get(key).getList.getIList.asScala.map(_.toInt)
  }

  private[loaders] def getType(attrMap: util.Map[String, AttrValue], key: String): DataType = {
    require(attrMap.containsKey(key), s"Operation doesn't contain attributed $key")
    attrMap.get(key).getType
  }

  private[loaders] def getType(nodeDef: NodeDef, key: String): DataType = {
    getType(nodeDef.getAttrMap, key)
  }

  private[loaders] def toArray[T: ClassTag](tensor: Tensor[T]): Array[T] = {
    require(tensor.nDimension() == 1, "require 1D tensor")
    val array = new Array[T](tensor.nElement())
    var i = 0
    while(i < array.length) {
      array(i) = tensor.valueAt(i + 1)
      i += 1
    }
    array
  }
} 
Example 95
Source File: IsFinite.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{IsFinite => IsFiniteOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class IsFinite extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      IsFiniteOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      IsFiniteOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Inv when type is ${t}")
    }
  }
} 
Example 96
Source File: Conv3D.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity, DataFormat}
import com.intel.analytics.bigdl.nn.tf.{Conv3D => Conv3DOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class Conv3D extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val attributes = nodeDef.getAttrMap
    val (pT, pW, pH) =
      if (getString(attributes, "padding") == "SAME") {
        (-1, -1, -1)
      } else {
        (0, 0, 0)
      }
    val strideList = getIntList(attributes, "strides")
    require(strideList.head == 1, s"not support strides on batch")

    val format = getString(attributes, "data_format")
    val conv = format match {
      case "NDHWC" =>
        require(strideList(4) == 1, s"not support strides on depth")
        val dT = strideList(1)
        val dW = strideList(2)
        val dH = strideList(3)

        Conv3DOps[T](dT, dW, dH, pT, pW, pH, DataFormat.NHWC)
      case "NCDHW" =>
        require(strideList(1) == 1, s"not support strides on depth")
        val dT = strideList(2)
        val dW = strideList(3)
        val dH = strideList(4)
        Conv3DOps[T](dT, dW, dH, pT, pW, pH, DataFormat.NCHW)
      case _ =>
        throw new IllegalArgumentException(s"not supported data format: $format")
    }
    conv.asInstanceOf[AbstractModule[Activity, Activity, T]]
  }
} 
Example 97
Source File: Relu6.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.ReLU6
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Relu6 extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      ReLU6[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      ReLU6[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load ReLU6 when type is ${t}")
    }
  }
} 
Example 98
Source File: Split.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ConcatTable
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.nn.tf.SplitAndSelect
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.NodeDef

import scala.reflect.ClassTag

class Split extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
    context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val numSplit = nodeDef.getAttrMap.get("num_split").getI.toInt
    new SplitLoadTF[T](numSplit)
  }
}

class SplitLoadTF[T: ClassTag](val numSplit: Int)(implicit ev: TensorNumeric[T])
  extends Adapter[T](Array(1)) {
  override def build(tensorArrays: Array[Tensor[_]]): AbstractModule[Activity, Activity, T] = {
    val dim = tensorArrays(0).asInstanceOf[Tensor[Int]].value() + 1
    val model = new ConcatTable[T]()
    for (index <- Range(1, numSplit + 1)) {
      model.add(SplitAndSelect[T](dim, index, numSplit))
    }
    model
  }
} 
Example 99
Source File: Round.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Round => RoundOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Round extends TensorflowOpsLoader {

  import Utils._

  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
    (implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      RoundOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      RoundOps[T, Double]()
    } else if (t == DataType.DT_INT32) {
      RoundOps[T, Int]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Round when type is ${t}")
    }
  }
} 
Example 100
Source File: Sqrt.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.tf.Power
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.tf.Context
import com.intel.analytics.bigdl.utils.tf.loaders.Utils.getType
import org.tensorflow.framework.{DataType, NodeDef}

import scala.reflect.ClassTag

class Sqrt extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      Power[T, Float](0.5)
    } else if (t == DataType.DT_DOUBLE) {
      Power[T, Double](0.5)
    } else {
      throw new UnsupportedOperationException(s"Not support load Sqrt when type is $t")
    }
  }
} 
Example 101
Source File: Minimum.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf.loaders

import java.nio.ByteOrder

import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.ops.{Minimum => MinimumOps}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import org.tensorflow.framework.{DataType, NodeDef}
import Utils._
import com.intel.analytics.bigdl.utils.tf.Context

import scala.reflect.ClassTag

class Minimum extends TensorflowOpsLoader {
  override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
                                  context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
    val t = getType(nodeDef.getAttrMap, "T")
    if (t == DataType.DT_FLOAT) {
      MinimumOps[T, Float]()
    } else if (t == DataType.DT_DOUBLE) {
      MinimumOps[T, Double]()
    } else {
      throw new UnsupportedOperationException(s"Not support load Maximum when type is $t")
    }
  }
} 
Example 102
Source File: TFRecordIterator.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf

import java.io.{BufferedInputStream, File, FileInputStream, InputStream}
import java.nio.{ByteBuffer, ByteOrder}


class TFRecordIterator(inputStream: InputStream) extends Iterator[Array[Byte]] {

  private var dataBuffer: Array[Byte] = null

  private val lengthBuffer: Array[Byte] = new Array[Byte](8)



  override def hasNext: Boolean = {
    if (dataBuffer != null) {
      true
    } else {
      val numOfBytes = inputStream.read(lengthBuffer)
      if (numOfBytes == 8) {
        val lengthWrapper = ByteBuffer.wrap(lengthBuffer)
        lengthWrapper.order(ByteOrder.LITTLE_ENDIAN)
        val length = lengthWrapper.getLong().toInt
        // todo, do crc check, simply skip now
        inputStream.skip(4)

        dataBuffer = new Array[Byte](length)
        inputStream.read(dataBuffer)
        // todo, do crc check, simply skip now
        inputStream.skip(4)
        true
      } else {
        inputStream.close()
        false
      }
    }
  }

  override def next(): Array[Byte] = {
    if (hasNext) {
      val data = this.dataBuffer
      this.dataBuffer = null
      data
    } else {
      throw new NoSuchElementException("next on empty iterator")
    }
  }
}

object TFRecordIterator {
  def apply(file: File): TFRecordIterator = {
    val inputStream = new FileInputStream(file)
    new TFRecordIterator(inputStream)
  }
} 
Example 103
Source File: TFUtilsSpec.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.tf

import java.io.File
import java.nio.ByteOrder

import com.google.protobuf.ByteString
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.T
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}
import org.tensorflow.framework.TensorProto

import scala.collection.JavaConverters._

class TFUtilsSpec extends FlatSpec with Matchers with BeforeAndAfter {

  private var constTensors: Map[String, TensorProto] = null
  before {
    constTensors = getConstTensorProto()
  }

  private def getConstTensorProto(): Map[String, TensorProto] = {
    val resource = getClass.getClassLoader.getResource("tf")
    val path = resource.getPath + File.separator + "consts.pbtxt"
    val nodes = TensorflowLoader.parseTxt(path)
    nodes.asScala.map(node => node.getName -> node.getAttrMap.get("value").getTensor).toMap
  }

  "parseTensor " should "work with bool TensorProto" in {
    val tensorProto = constTensors("bool_const")
    val bigdlTensor = TFUtils.parseTensor(tensorProto, ByteOrder.LITTLE_ENDIAN)
    bigdlTensor should be (Tensor[Boolean](T(true, false, true, false)))
  }

  "parseTensor " should "work with float TensorProto" in {
    val tensorProto = constTensors("float_const")
    val bigdlTensor = TFUtils.parseTensor(tensorProto, ByteOrder.LITTLE_ENDIAN)
    bigdlTensor should be (Tensor[Float](T(1.0f, 2.0f, 3.0f, 4.0f)))
  }

  "parseTensor " should "work with double TensorProto" in {
    val tensorProto = constTensors("double_const")
    val bigdlTensor = TFUtils.parseTensor(tensorProto, ByteOrder.LITTLE_ENDIAN)
    bigdlTensor should be (Tensor[Double](T(1.0, 2.0, 3.0, 4.0)))
  }

  "parseTensor " should "work with int TensorProto" in {
    val tensorProto = constTensors("int_const")
    val bigdlTensor = TFUtils.parseTensor(tensorProto, ByteOrder.LITTLE_ENDIAN)
    bigdlTensor should be (Tensor[Int](T(1, 2, 3, 4)))
  }

  "parseTensor " should "work with long TensorProto" in {
    val tensorProto = constTensors("long_const")
    val bigdlTensor = TFUtils.parseTensor(tensorProto, ByteOrder.LITTLE_ENDIAN)
    bigdlTensor should be (Tensor[Long](T(1, 2, 3, 4)))
  }

  "parseTensor " should "work with int8 TensorProto" in {
    val tensorProto = constTensors("int8_const")
    val bigdlTensor = TFUtils.parseTensor(tensorProto, ByteOrder.LITTLE_ENDIAN)
    bigdlTensor should be (Tensor[Int](T(1, 2, 3, 4)))
  }

  "parseTensor " should "work with uint8 TensorProto" in {
    val tensorProto = constTensors("uint8_const")
    val bigdlTensor = TFUtils.parseTensor(tensorProto, ByteOrder.LITTLE_ENDIAN)
    bigdlTensor should be (Tensor[Int](T(1, 2, 3, 4)))
  }

  "parseTensor " should "work with int16 TensorProto" in {
    val tensorProto = constTensors("int16_const")
    val bigdlTensor = TFUtils.parseTensor(tensorProto, ByteOrder.LITTLE_ENDIAN)
    bigdlTensor should be (Tensor[Int](T(1, 2, 3, 4)))
  }

  "parseTensor " should "work with uint16 TensorProto" in {
    val tensorProto = constTensors("uint16_const")
    val bigdlTensor = TFUtils.parseTensor(tensorProto, ByteOrder.LITTLE_ENDIAN)
    bigdlTensor should be (Tensor[Int](T(1, 2, 3, 4)))
  }

  "parseTensor " should "work with string TensorProto" in {
    import TFTensorNumeric.NumericByteString
    val tensorProto = constTensors("string_const")
    val bigdlTensor = TFUtils.parseTensor(tensorProto, ByteOrder.LITTLE_ENDIAN)
    val data = Array(
      ByteString.copyFromUtf8("a"),
      ByteString.copyFromUtf8("b"),
      ByteString.copyFromUtf8("c"),
      ByteString.copyFromUtf8("d")
    )
    bigdlTensor should be (Tensor[ByteString](data, Array[Int](4)))
  }
} 
Example 104
Source File: ByteUtils.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.utils

import java.math.BigInteger
import java.nio.{ByteBuffer, ByteOrder}

import akka.util.ByteString

import scala.util.Random

object ByteUtils {

  def bigIntegerToBytes(b: BigInteger, numBytes: Int): Array[Byte] = {
    val bytes = new Array[Byte](numBytes)
    val biBytes = b.toByteArray
    val start = if (biBytes.length == numBytes + 1) 1 else 0
    val length = Math.min(biBytes.length, numBytes)
    System.arraycopy(biBytes, start, bytes, numBytes - length, length)
    bytes
  }

  def xor(a: Array[Byte], b: Array[Byte]): Array[Byte] = {
    (a zip b) map { case (b1, b2) => (b1 ^ b2).toByte }
  }

  def or(arrays: Array[Byte]*): Array[Byte] = {
    require(arrays.map(_.length).distinct.length <= 1, "All the arrays should have the same length")
    require(arrays.nonEmpty, "There should be one or more arrays")

    val zeroes = Array.fill(arrays.head.length)(0.toByte)
    arrays.foldLeft[Array[Byte]](zeroes){
      case (prevOr, array) => prevOr.zip(array).map{ case (b1, b2) => (b1 | b2).toByte }
    }
  }

  def and(arrays: Array[Byte]*): Array[Byte] = {
    require(arrays.map(_.length).distinct.length <= 1, "All the arrays should have the same length")
    require(arrays.nonEmpty, "There should be one or more arrays")

    val ones = Array.fill(arrays.head.length)(0xFF.toByte)
    arrays.foldLeft[Array[Byte]](ones){
      case (prevOr, array) => prevOr.zip(array).map{ case (b1, b2) => (b1 & b2).toByte }
    }
  }

  def randomBytes(len: Int): Array[Byte] = {
    val arr = new Array[Byte](len)
    new Random().nextBytes(arr)
    arr
  }

  def bigEndianToShort(bs: Array[Byte]): Short = {
    val n = bs(0) << 8
    (n | bs(1) & 0xFF).toShort
  }

  def padLeft(bytes: ByteString, length: Int, byte: Byte = 0): ByteString = {
    val l = math.max(0, length - bytes.length)
    val fill = Seq.fill[Byte](l)(byte)
    fill ++: bytes
  }

  def compactPickledBytes(buffer: ByteBuffer): ByteString = {
    val data = Array.ofDim[Byte](buffer.limit)
    buffer.rewind()
    buffer.get(data)
    ByteString(data)
  }


  def bytesToInts(bytes: Array[Byte]): Array[Int] =
    bytes.grouped(4).map(getIntFromWord).toArray

  def intsToBytes(input: Array[Int]): Array[Byte] = {
    input.flatMap { i =>
      Array(
        (i & 0xFF).toByte,
        ((i >> 8) & 0xFF).toByte,
        ((i >> 16) & 0xFF).toByte,
        ((i >> 24) & 0xFF).toByte)
    }
  }

  def getIntFromWord(arr: Array[Byte]): Int = {
    ByteBuffer.wrap(arr, 0, 4).order(ByteOrder.LITTLE_ENDIAN).getInt
  }

} 
Example 105
Source File: PLYReadWriteTests.scala    From scalismo-faces   with Apache License 2.0 5 votes vote down vote up
package scalismo.faces.io

import java.io.{ByteArrayInputStream, ByteArrayOutputStream, OutputStreamWriter}
import java.nio.ByteOrder
import java.util.Scanner

import scalismo.faces.FacesTestSuite
import scalismo.faces.io.ply._

class PLYReadWriteTests extends FacesTestSuite {

  describe("Write-read cycles to string, big- and little endian") {

    def testRWEndianCycle[A:StringWriter:StringReader:EndianWriter:EndianReader](toWrite: IndexedSeq[A], bo: ByteOrder): Unit = {
      val N = toWrite.size
      val os = new ByteArrayOutputStream()
      val writer = new SequenceWriter[A]
      writer.write(toWrite, os, bo)

      val ba = os.toByteArray

      val is = new ByteArrayInputStream(ba)
      val reader = new FixedLengthSequenceReader[A]
      val read = reader.read(N, is, bo)

      read.zip(toWrite).foreach { p =>
        p._1 shouldBe p._2
      }
    }

    def testRWStringCycle[A:StringWriter:StringReader:EndianWriter:EndianReader](toWrite: IndexedSeq[A]): Unit = {
      val N = toWrite.size
      val os = new ByteArrayOutputStream()
      val osw = new OutputStreamWriter(os)
      val writer = new SequenceWriter[A]
      writer.write(toWrite, osw)
      osw.flush()

      val is = new ByteArrayInputStream(os.toByteArray)
      val isr = new Scanner(is)
      val reader = new FixedLengthSequenceReader[A]
      val read = reader.read(N, isr)

      read.zip(toWrite).foreach { p =>
        p._1 shouldBe p._2
      }
    }

    def testAllThreeCycles[A:StringWriter:StringReader:EndianWriter:EndianReader](toWrite: IndexedSeq[A]): Unit = {
      testRWStringCycle(toWrite)
      testRWEndianCycle(toWrite, ByteOrder.BIG_ENDIAN)
      testRWEndianCycle(toWrite, ByteOrder.LITTLE_ENDIAN)
    }

    it("should result in the same sequence of bytes") {
      val toWrite = for (i <- 0 until 20) yield (randomDouble * 255).toByte
      testAllThreeCycles(toWrite)
    }
    it("should result in the same sequence of char") {
      val toWrite = for (i <- 0 until 20) yield (randomDouble * 255).toChar
      testAllThreeCycles(toWrite)
    }
    it("should result in the same sequence of short") {
      val toWrite = for (i <- 0 until 20) yield (randomDouble * 255).toShort
      testAllThreeCycles(toWrite)
    }
    it("should result in the same sequence of int") {
      val toWrite = for (i <- 0 until 20) yield (randomDouble * 255).toInt
      testAllThreeCycles(toWrite)
    }
    it("should result in the same sequence of long") {
      val toWrite = for (i <- 0 until 20) yield (randomDouble * 255).toLong
      testAllThreeCycles(toWrite)
    }
    it("should result in the same sequence of float") {
      val toWrite = for (i <- 0 until 20) yield (randomDouble * 255).toFloat
      testAllThreeCycles(toWrite)
    }
    it("should result in the same sequence of double") {
      val toWrite = for (i <- 0 until 20) yield (randomDouble * 255)
      testAllThreeCycles(toWrite)
    }

  }

} 
Example 106
Source File: RocksHelper.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.core.storage.rocks

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.s2graph.core.QueryParam

object RocksHelper {

  def intToBytes(value: Int): Array[Byte] = {
    val intBuffer = ByteBuffer.allocate(4).order(ByteOrder.nativeOrder())
    intBuffer.clear()
    intBuffer.putInt(value)
    intBuffer.array()
  }

  def longToBytes(value: Long): Array[Byte] = {
    val longBuffer = ByteBuffer.allocate(8).order(ByteOrder.nativeOrder())
    longBuffer.clear()
    longBuffer.putLong(value)
    longBuffer.array()
  }

  def bytesToInt(data: Array[Byte], offset: Int): Int = {
    if (data != null) {
      val intBuffer = ByteBuffer.allocate(4).order(ByteOrder.nativeOrder())
      intBuffer.put(data, offset, 4)
      intBuffer.flip()
      intBuffer.getInt()
    } else 0
  }

  def bytesToLong(data: Array[Byte], offset: Int): Long = {
    if (data != null) {
      val longBuffer = ByteBuffer.allocate(8).order(ByteOrder.nativeOrder())
      longBuffer.put(data, offset, 8)
      longBuffer.flip()
      longBuffer.getLong()
    } else 0L
  }

  case class ScanWithRange(cf: Array[Byte], startKey: Array[Byte], stopKey: Array[Byte], offset: Int, limit: Int)
  case class GetRequest(cf: Array[Byte], key: Array[Byte])

  type RocksRPC = Either[GetRequest, ScanWithRange]
} 
Example 107
Source File: ManagerUtilsSuite.scala    From darwin   with Apache License 2.0 5 votes vote down vote up
package it.agilelab.darwin.app.mock

import java.nio.{ByteBuffer, ByteOrder}

import com.typesafe.config.ConfigFactory
import it.agilelab.darwin.manager.AvroSchemaManagerFactory
import it.agilelab.darwin.manager.util.{AvroSingleObjectEncodingUtils, ConfigurationKeys}
import it.agilelab.darwin.manager.util.ByteArrayUtils._

import scala.util.Random
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

class BigEndianManagerUtilsSuite extends ManagerUtilsSuite(ByteOrder.BIG_ENDIAN)

class LittleEndianManagerUtilsSuite extends ManagerUtilsSuite(ByteOrder.LITTLE_ENDIAN)

abstract class ManagerUtilsSuite(endianness: ByteOrder) extends AnyFlatSpec with Matchers {

  "AvroSchemaManager utilities" should "create a Single-Object encoded byte array" in {
    val ORIGINAL_LENGTH: Int = 10
    val originalSchema = SchemaReader.readFromResources("OneField.avsc")
    val config =
      ConfigFactory
        .parseMap(new java.util.HashMap[String, String]() {
          {
            put(ConfigurationKeys.MANAGER_TYPE, ConfigurationKeys.CACHED_EAGER)
            put(ConfigurationKeys.ENDIANNESS, endianness.toString)
          }
        })
        .withFallback(ConfigFactory.load())
        .resolve()
    val manager = AvroSchemaManagerFactory.initialize(config)
    manager.registerAll(Seq(originalSchema))
    val originalPayload = new Array[Byte](ORIGINAL_LENGTH)
    Random.nextBytes(originalPayload)
    val data: Array[Byte] = manager.generateAvroSingleObjectEncoded(originalPayload, originalSchema)
    assert(AvroSingleObjectEncodingUtils.isAvroSingleObjectEncoded(data))
    val (schema, payload) = manager.retrieveSchemaAndAvroPayload(data)
    assert(schema == originalSchema)
    assert(originalPayload sameElements payload)
  }

  it should "convert a long to byte array and back" in {
    val longs = (1 to 10).map(_ => Random.nextLong())

    assert(
      longs == longs.map(
        x =>
          AvroSingleObjectEncodingUtils
            .readLong(ByteBuffer.wrap(x.longToByteArray(endianness)), endianness)
      )
    )
  }

} 
Example 108
Source File: CachedEagerApplicationSuite.scala    From darwin   with Apache License 2.0 5 votes vote down vote up
package it.agilelab.darwin.app.mock

import java.lang.reflect.Modifier
import java.nio.ByteOrder

import com.typesafe.config.{Config, ConfigFactory}
import it.agilelab.darwin.annotations.AvroSerde
import it.agilelab.darwin.app.mock.classes.{MyClass, MyNestedClass, NewClass, OneField}
import it.agilelab.darwin.common.{Connector, ConnectorFactory}
import it.agilelab.darwin.manager.{AvroSchemaManager, CachedEagerAvroSchemaManager}
import org.apache.avro.{Schema, SchemaNormalization}
import org.apache.avro.reflect.ReflectData
import org.reflections.Reflections

import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import it.agilelab.darwin.common.compat._

class BigEndianCachedEagerApplicationSuite extends CachedEagerApplicationSuite(ByteOrder.BIG_ENDIAN)

class LittleEndianCachedEagerApplicationSuite extends CachedEagerApplicationSuite(ByteOrder.LITTLE_ENDIAN)

abstract class CachedEagerApplicationSuite(val endianness: ByteOrder) extends AnyFlatSpec with Matchers {

  val config: Config = ConfigFactory.load()
  val connector: Connector = ConnectorFactory.connector(config)
  val manager: AvroSchemaManager = new CachedEagerAvroSchemaManager(connector, endianness)

  "CachedEagerAvroSchemaManager" should "not fail after the initialization" in {
    val schemas: Seq[Schema] = Seq(SchemaReader.readFromResources("MyNestedClass.avsc"))
    assert(manager.registerAll(schemas).size == 1)
  }

  it should "load all existing schemas and register a new one" in {
    val schemas: Seq[Schema] = Seq(SchemaReader.readFromResources("MyNestedClass.avsc"))
    manager.getSchema(0L)

    manager.registerAll(schemas)

    val id = manager.getId(schemas.head)
    assert(manager.getSchema(id).isDefined)
    assert(schemas.head == manager.getSchema(id).get)
  }

  it should "get all previously registered schemas" in {
    val schema: Schema = SchemaReader.readFromResources("MyNestedClass.avsc")
    val schema0 = manager.getSchema(0L)
    val schema1 = manager.getSchema(1L)
    assert(schema0.isDefined)
    assert(schema1.isDefined)
    assert(schema0.get != schema1.get)
    assert(schema != schema0.get)
    assert(schema != schema1.get)
  }

  it should "generate all schemas for all the annotated classes with @AvroSerde" in {
    val reflections = new Reflections("it.agilelab.darwin.app.mock.classes")

    val oneFieldSchema = ReflectData.get().getSchema(classOf[OneField]).toString
    val myNestedSchema = ReflectData.get().getSchema(classOf[MyNestedClass]).toString
    val myClassSchema = ReflectData.get().getSchema(classOf[MyClass]).toString

    val annotationClass: Class[AvroSerde] = classOf[AvroSerde]
    val classes = reflections.getTypesAnnotatedWith(annotationClass).toScala.toSeq
      .filter(c => !c.isInterface && !Modifier.isAbstract(c.getModifiers))
    val schemas = classes.map(c => ReflectData.get().getSchema(Class.forName(c.getName)).toString)
    Seq(oneFieldSchema, myClassSchema, myNestedSchema) should contain theSameElementsAs schemas
  }

  it should "reload all schemas from the connector" in {
    val newSchema = ReflectData.get().getSchema(classOf[NewClass])
    val newId = SchemaNormalization.parsingFingerprint64(newSchema)
    assert(manager.getSchema(newId).isEmpty)

    connector.insert(Seq(newId -> newSchema))
    assert(manager.getSchema(newId).isEmpty)

    manager.reload()
    assert(manager.getSchema(newId).isDefined)
    assert(manager.getSchema(newId).get == newSchema)
  }

} 
Example 109
Source File: CachedLazyApplicationSuite.scala    From darwin   with Apache License 2.0 5 votes vote down vote up
package it.agilelab.darwin.app.mock

import java.lang.reflect.Modifier
import java.nio.ByteOrder

import com.typesafe.config.{Config, ConfigFactory}
import it.agilelab.darwin.annotations.AvroSerde
import it.agilelab.darwin.app.mock.classes.{MyClass, MyNestedClass, NewClass, OneField}
import it.agilelab.darwin.common.{Connector, ConnectorFactory}
import it.agilelab.darwin.manager.{AvroSchemaManager, CachedLazyAvroSchemaManager}
import org.apache.avro.{Schema, SchemaNormalization}
import org.apache.avro.reflect.ReflectData
import org.reflections.Reflections

import it.agilelab.darwin.common.compat._
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

class BigEndianCachedLazyApplicationSuite extends CachedLazyApplicationSuite(ByteOrder.BIG_ENDIAN)

class LittleEndianCachedLazyApplicationSuite extends CachedLazyApplicationSuite(ByteOrder.LITTLE_ENDIAN)

abstract class CachedLazyApplicationSuite(val endianness: ByteOrder) extends AnyFlatSpec with Matchers {

  val config: Config = ConfigFactory.load()
  val connector: Connector = ConnectorFactory.connector(config)
  val manager: AvroSchemaManager = new CachedLazyAvroSchemaManager(connector, endianness)

  "CachedLazyAvroSchemaManager" should "not fail after the initialization" in {
    val schemas: Seq[Schema] = Seq(SchemaReader.readFromResources("MyNestedClass.avsc"))
    assert(manager.registerAll(schemas).size == 1)
  }

  it should "load all existing schemas and register a new one" in {
    val schemas: Seq[Schema] = Seq(SchemaReader.readFromResources("MyNestedClass.avsc"))
    manager.getSchema(0L)

    manager.registerAll(schemas)

    val id = manager.getId(schemas.head)
    assert(manager.getSchema(id).isDefined)
    assert(schemas.head == manager.getSchema(id).get)
  }

  it should "get all previously registered schemas" in {
    val schema: Schema = SchemaReader.readFromResources("MyNestedClass.avsc")
    val schema0 = manager.getSchema(0L)
    val schema1 = manager.getSchema(1L)
    assert(schema0.isDefined)
    assert(schema1.isDefined)
    assert(schema0.get != schema1.get)
    assert(schema != schema0.get)
    assert(schema != schema1.get)
  }

  it should "generate all schemas for all the annotated classes with @AvroSerde" in {
    val reflections = new Reflections("it.agilelab.darwin.app.mock.classes")

    val oneFieldSchema = ReflectData.get().getSchema(classOf[OneField]).toString
    val myNestedSchema = ReflectData.get().getSchema(classOf[MyNestedClass]).toString
    val myClassSchema = ReflectData.get().getSchema(classOf[MyClass]).toString

    val annotationClass: Class[AvroSerde] = classOf[AvroSerde]
    val classes = reflections.getTypesAnnotatedWith(annotationClass).toScala.toSeq
      .filter(c => !c.isInterface && !Modifier.isAbstract(c.getModifiers))
    val schemas = classes.map(c => ReflectData.get().getSchema(Class.forName(c.getName)).toString)
    Seq(oneFieldSchema, myClassSchema, myNestedSchema) should contain theSameElementsAs schemas
  }

  it should "reload all schemas from the connector" in {
    val newSchema = ReflectData.get().getSchema(classOf[NewClass])
    val newId = SchemaNormalization.parsingFingerprint64(newSchema)
    assert(manager.getSchema(newId).isEmpty)

    connector.insert(Seq(newId -> newSchema))
    assert(manager.getSchema(newId).isDefined)
    assert(manager.getSchema(newId).get == newSchema)
  }
} 
Example 110
Source File: LazyApplicationSuite.scala    From darwin   with Apache License 2.0 5 votes vote down vote up
package it.agilelab.darwin.app.mock

import java.lang.reflect.Modifier
import java.nio.ByteOrder

import com.typesafe.config.{Config, ConfigFactory}
import it.agilelab.darwin.annotations.AvroSerde
import it.agilelab.darwin.app.mock.classes.{MyClass, MyNestedClass, NewClass, OneField}
import it.agilelab.darwin.common.{Connector, ConnectorFactory}
import it.agilelab.darwin.manager.{AvroSchemaManager, LazyAvroSchemaManager}
import org.apache.avro.{Schema, SchemaNormalization}
import org.apache.avro.reflect.ReflectData
import org.reflections.Reflections

import it.agilelab.darwin.common.compat._
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

class BigEndianLazyApplicationSuite extends LazyApplicationSuite(ByteOrder.BIG_ENDIAN)

class LittleEndianLazyApplicationSuite extends LazyApplicationSuite(ByteOrder.LITTLE_ENDIAN)

abstract class LazyApplicationSuite(endianness: ByteOrder) extends AnyFlatSpec with Matchers {

  val config: Config = ConfigFactory.load()
  val connector: Connector = ConnectorFactory.connector(config)
  val manager: AvroSchemaManager = new LazyAvroSchemaManager(connector, endianness)

  "LazyAvroSchemaManager" should "not fail after the initialization" in {
    val schemas: Seq[Schema] = Seq(SchemaReader.readFromResources("MyNestedClass.avsc"))
    assert(manager.registerAll(schemas).size == 1)
  }

  it should "load all existing schemas and register a new one" in {
    val schemas: Seq[Schema] = Seq(SchemaReader.readFromResources("MyNestedClass.avsc"))
    manager.getSchema(0L)

    manager.registerAll(schemas)

    val id = manager.getId(schemas.head)
    assert(manager.getSchema(id).isDefined)
    assert(schemas.head == manager.getSchema(id).get)
  }

  it should "get all previously registered schemas" in {
    val schema: Schema = SchemaReader.readFromResources("MyNestedClass.avsc")
    val schema0 = manager.getSchema(0L)
    val schema1 = manager.getSchema(1L)
    assert(schema0.isDefined)
    assert(schema1.isDefined)
    assert(schema0.get != schema1.get)
    assert(schema != schema0.get)
    assert(schema != schema1.get)
  }

  it should "generate all schemas for all the annotated classes with @AvroSerde" in {
    val reflections = new Reflections("it.agilelab.darwin.app.mock.classes")

    val oneFieldSchema = ReflectData.get().getSchema(classOf[OneField]).toString
    val myNestedSchema = ReflectData.get().getSchema(classOf[MyNestedClass]).toString
    val myClassSchema = ReflectData.get().getSchema(classOf[MyClass]).toString

    val annotationClass: Class[AvroSerde] = classOf[AvroSerde]
    val classes = reflections.getTypesAnnotatedWith(annotationClass).toScala.toSeq
      .filter(c => !c.isInterface && !Modifier.isAbstract(c.getModifiers))
    val schemas = classes.map(c => ReflectData.get().getSchema(Class.forName(c.getName)).toString)
    Seq(oneFieldSchema, myClassSchema, myNestedSchema) should contain theSameElementsAs schemas
  }

  it should "reload all schemas from the connector" in {
    val newSchema = ReflectData.get().getSchema(classOf[NewClass])
    val newId = SchemaNormalization.parsingFingerprint64(newSchema)
    assert(manager.getSchema(newId).isEmpty)

    connector.insert(Seq(newId -> newSchema))
    assert(manager.getSchema(newId).isDefined)
    assert(manager.getSchema(newId).get == newSchema)
  }
} 
Example 111
Source File: ConfigUtil.scala    From darwin   with Apache License 2.0 5 votes vote down vote up
package it.agilelab.darwin.manager.util

import java.nio.ByteOrder

import com.typesafe.config.{Config, ConfigRenderOptions}

object ConfigUtil {
  def printConfig(conf: Config): String = {
    conf.root().render(ConfigRenderOptions.defaults().setComments(false).setOriginComments(false))
  }

  def printSmallConfig(conf: Config): String = {
    conf.root().render(ConfigRenderOptions.defaults().setComments(false).setOriginComments(false))
  }

  def stringToEndianness(string: String): ByteOrder = {
    string.toUpperCase match {
      case "BIG_ENDIAN" => ByteOrder.BIG_ENDIAN
      case "LITTLE_ENDIAN" => ByteOrder.LITTLE_ENDIAN
      case _ => throw new IllegalArgumentException(s"Unknown endianness: $string")
    }
  }

} 
Example 112
Source File: ByteArrayUtils.scala    From darwin   with Apache License 2.0 5 votes vote down vote up
package it.agilelab.darwin.manager.util

import java.io.OutputStream
import java.nio.{ByteBuffer, ByteOrder}

import it.agilelab.darwin.common.LONG_SIZE


private[darwin] object ByteArrayUtils {

  implicit class EnrichedLong(val l: Long) extends AnyVal {
    
    def writeToStream(os: OutputStream, endianness: ByteOrder): Unit = {
      endianness match {
        case ByteOrder.BIG_ENDIAN =>
          os.write((l >>> 56).asInstanceOf[Int])
          os.write((l >>> 48).asInstanceOf[Int])
          os.write((l >>> 40).asInstanceOf[Int])
          os.write((l >>> 32).asInstanceOf[Int])
          os.write((l >>> 24).asInstanceOf[Int])
          os.write((l >>> 16).asInstanceOf[Int])
          os.write((l >>> 8).asInstanceOf[Int])
          os.write((l >>> 0).asInstanceOf[Int])
        case ByteOrder.LITTLE_ENDIAN =>
          os.write((l >>> 0).asInstanceOf[Int])
          os.write((l >>> 8).asInstanceOf[Int])
          os.write((l >>> 16).asInstanceOf[Int])
          os.write((l >>> 24).asInstanceOf[Int])
          os.write((l >>> 32).asInstanceOf[Int])
          os.write((l >>> 40).asInstanceOf[Int])
          os.write((l >>> 48).asInstanceOf[Int])
          os.write((l >>> 56).asInstanceOf[Int])
      }
    }
  }

  def arrayEquals(b1: Array[Byte], b2: Array[Byte], start1: Int, start2: Int, length: Int): Boolean = {
    require(length > 0, "length must be positive")
    var i = start1
    var j = start2
    var areEqual = true
    while (areEqual && i < start1 + length) {
      if (b1(i) != b2(j)) {
        areEqual = false
      }
      i += 1
      j += 1
    }
    areEqual
  }


} 
Example 113
Source File: SchemaManagerSparkApp.scala    From darwin   with Apache License 2.0 5 votes vote down vote up
package it.agilelab.darwin.app.spark

import java.nio.ByteOrder

import com.typesafe.config.{Config, ConfigFactory}
import it.agilelab.darwin.app.spark.classes._
import it.agilelab.darwin.manager.AvroSchemaManagerFactory
import org.apache.avro.reflect.ReflectData
import org.apache.hadoop.fs.FileSystem
import org.apache.spark.sql.SparkSession
import org.slf4j.{Logger, LoggerFactory}

object SchemaManagerSparkApp extends GenericMainClass with SparkManager {

  val mainLogger: Logger = LoggerFactory.getLogger("SchemaManagerSparkApp")

  val endianness: ByteOrder = ByteOrder.BIG_ENDIAN

  override protected def runJob(settings: Config)(implicit fs: FileSystem, sparkSession: SparkSession): Int = {
    import sparkSession.implicits._

    val ds = sparkSession.createDataset(sparkSession.sparkContext.parallelize(1 to 1000, 20))
    mainLogger.info("Registering schemas")
    //    val reflections = new Reflections("it.agilelab.darwin.app.spark.classes")
    //    val annotationClass: Class[AvroSerde] = classOf[AvroSerde]
    //    val classes = reflections.getTypesAnnotatedWith(annotationClass).asScala.toSeq
    //      .filter(c => !c.isInterface && !Modifier.isAbstract(c.getModifiers))
    //    val schemas = classes.map(c => ReflectData.get().getSchema(Class.forName(c.getName)))
    val schemas = Seq(ReflectData.get().getSchema(classOf[Menu]), ReflectData.get().getSchema(classOf[MenuItem]),
      ReflectData.get().getSchema(classOf[Food]), ReflectData.get().getSchema(classOf[Order]),
      ReflectData.get().getSchema(classOf[Price]))
    val conf = ConfigFactory.load()
    val manager = AvroSchemaManagerFactory.initialize(conf)
    val registeredIDs: Seq[Long] = manager.registerAll(schemas).map(_._1)
    mainLogger.info("Schemas registered")

    mainLogger.info("Getting ID for a schema")
    manager.getId(ReflectData.get().getSchema(classOf[Menu]))
    mainLogger.info("ID retrieved for the schema")

    mainLogger.info("Get Schema from ID")
    val d2 = ds.map { x =>
      AvroSchemaManagerFactory.initialize(conf).getSchema(registeredIDs(x % registeredIDs.size))
      x
    }
    d2.count()
    mainLogger.info("All schemas obtained")
    10
  }

  override protected def handleException(exception: Throwable, applicationSettings: Config): Unit = {
    mainLogger.error(exception.getMessage)
  }
} 
Example 114
Source File: CachedLazyAvroSchemaManager.scala    From darwin   with Apache License 2.0 5 votes vote down vote up
package it.agilelab.darwin.manager

import java.nio.ByteOrder

import it.agilelab.darwin.common.Connector
import org.apache.avro.Schema


class CachedLazyAvroSchemaManager(connector: Connector, endianness: ByteOrder)
  extends CachedAvroSchemaManager(connector, endianness) {

  override def getSchema(id: Long): Option[Schema] = {
    cache.getSchema(id).orElse {
      val schema: Option[Schema] = connector.findSchema(id)
      schema.foreach(s => _cache.set(Some(cache.insert(Seq(getId(s) -> s)))))
      schema
    }
  }

  override def getAll: Seq[(Long, Schema)] = {
    _cache.set(Some(cache.insert(connector.fullLoad())))
    cache.getAll
  }
} 
Example 115
Source File: NullableColumnAccessor.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.sql.catalyst.InternalRow

private[columnar] trait NullableColumnAccessor extends ColumnAccessor {
  private var nullsBuffer: ByteBuffer = _
  private var nullCount: Int = _
  private var seenNulls: Int = 0

  private var nextNullIndex: Int = _
  private var pos: Int = 0

  abstract override protected def initialize(): Unit = {
    nullsBuffer = underlyingBuffer.duplicate().order(ByteOrder.nativeOrder())
    nullCount = ByteBufferHelper.getInt(nullsBuffer)
    nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1
    pos = 0

    underlyingBuffer.position(underlyingBuffer.position() + 4 + nullCount * 4)
    super.initialize()
  }

  abstract override def extractTo(row: InternalRow, ordinal: Int): Unit = {
    if (pos == nextNullIndex) {
      seenNulls += 1

      if (seenNulls < nullCount) {
        nextNullIndex = ByteBufferHelper.getInt(nullsBuffer)
      }

      row.setNullAt(ordinal)
    } else {
      super.extractTo(row, ordinal)
    }

    pos += 1
  }

  abstract override def hasNext: Boolean = seenNulls < nullCount || super.hasNext
} 
Example 116
Source File: NullableColumnBuilder.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.sql.catalyst.InternalRow


private[columnar] trait NullableColumnBuilder extends ColumnBuilder {
  protected var nulls: ByteBuffer = _
  protected var nullCount: Int = _
  private var pos: Int = _

  abstract override def initialize(
      initialSize: Int,
      columnName: String,
      useCompression: Boolean): Unit = {

    nulls = ByteBuffer.allocate(1024)
    nulls.order(ByteOrder.nativeOrder())
    pos = 0
    nullCount = 0
    super.initialize(initialSize, columnName, useCompression)
  }

  abstract override def appendFrom(row: InternalRow, ordinal: Int): Unit = {
    columnStats.gatherStats(row, ordinal)
    if (row.isNullAt(ordinal)) {
      nulls = ColumnBuilder.ensureFreeSpace(nulls, 4)
      nulls.putInt(pos)
      nullCount += 1
    } else {
      super.appendFrom(row, ordinal)
    }
    pos += 1
  }

  abstract override def build(): ByteBuffer = {
    val nonNulls = super.build()
    val nullDataLen = nulls.position()

    nulls.limit(nullDataLen)
    nulls.rewind()

    val buffer = ByteBuffer
      .allocate(4 + nullDataLen + nonNulls.remaining())
      .order(ByteOrder.nativeOrder())
      .putInt(nullCount)
      .put(nulls)
      .put(nonNulls)

    buffer.rewind()
    buffer
  }

  protected def buildNonNulls(): ByteBuffer = {
    nulls.limit(nulls.position()).rewind()
    super.build()
  }
} 
Example 117
Source File: CompressibleColumnBuilder.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar.compression

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.columnar.{ColumnBuilder, NativeColumnBuilder}
import org.apache.spark.sql.types.AtomicType
import org.apache.spark.unsafe.Platform


private[columnar] trait CompressibleColumnBuilder[T <: AtomicType]
  extends ColumnBuilder with Logging {

  this: NativeColumnBuilder[T] with WithCompressionSchemes =>

  var compressionEncoders: Seq[Encoder[T]] = _

  abstract override def initialize(
      initialSize: Int,
      columnName: String,
      useCompression: Boolean): Unit = {

    compressionEncoders =
      if (useCompression) {
        schemes.filter(_.supports(columnType)).map(_.encoder[T](columnType))
      } else {
        Seq(PassThrough.encoder(columnType))
      }
    super.initialize(initialSize, columnName, useCompression)
  }

  // The various compression schemes, while saving memory use, cause all of the data within
  // the row to become unaligned, thus causing crashes.  Until a way of fixing the compression
  // is found to also allow aligned accesses this must be disabled for SPARC.

  protected def isWorthCompressing(encoder: Encoder[T]) = {
    CompressibleColumnBuilder.unaligned && encoder.compressionRatio < 0.8
  }

  private def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = {
    compressionEncoders.foreach(_.gatherCompressibilityStats(row, ordinal))
  }

  abstract override def appendFrom(row: InternalRow, ordinal: Int): Unit = {
    super.appendFrom(row, ordinal)
    if (!row.isNullAt(ordinal)) {
      gatherCompressibilityStats(row, ordinal)
    }
  }

  override def build(): ByteBuffer = {
    val nonNullBuffer = buildNonNulls()
    val encoder: Encoder[T] = {
      val candidate = compressionEncoders.minBy(_.compressionRatio)
      if (isWorthCompressing(candidate)) candidate else PassThrough.encoder(columnType)
    }

    // Header = null count + null positions
    val headerSize = 4 + nulls.limit()
    val compressedSize = if (encoder.compressedSize == 0) {
      nonNullBuffer.remaining()
    } else {
      encoder.compressedSize
    }

    val compressedBuffer = ByteBuffer
      // Reserves 4 bytes for compression scheme ID
      .allocate(headerSize + 4 + compressedSize)
      .order(ByteOrder.nativeOrder)
      // Write the header
      .putInt(nullCount)
      .put(nulls)

    logDebug(s"Compressor for [$columnName]: $encoder, ratio: ${encoder.compressionRatio}")
    encoder.compress(nonNullBuffer, compressedBuffer)
  }
}

private[columnar] object CompressibleColumnBuilder {
  val unaligned = Platform.unaligned()
} 
Example 118
Source File: DiffEncoders.scala    From filo   with Apache License 2.0 5 votes vote down vote up
package org.velvia.filo.codecs

import com.google.flatbuffers.FlatBufferBuilder
import java.nio.{ByteBuffer, ByteOrder}
import org.joda.time.DateTime
import scala.collection.mutable.BitSet

import org.velvia.filo._
import org.velvia.filo.vector._


  def toPrimitiveVector[A: PrimitiveDataVectBuilder](data: Seq[A],
                                                     naMask: BitSet,
                                                     min: A,
                                                     max: A): ByteBuffer = {
    import DiffPrimitiveVector._

    val vectBuilder = implicitly[PrimitiveDataVectBuilder[A]]
    count += 1
    val fbb = new FlatBufferBuilder(getBuffer)
    val naOffset = populateNaMask(fbb, naMask, data.length)

    val ((dataOffset, dnbits), dsigned) = vectBuilder.buildDeltas(fbb, data, min, max)
    startDiffPrimitiveVector(fbb)
    addNaMask(fbb, naOffset)
    addLen(fbb, data.length)
    addData(fbb, dataOffset)
    addInfo(fbb, DataInfo.createDataInfo(fbb, dnbits, dsigned))
    addBase(fbb, vectBuilder.toLong(min))
    finishDiffPrimitiveVectorBuffer(fbb, endDiffPrimitiveVector(fbb))
    putHeaderAndGet(fbb, WireFormat.VECTORTYPE_DIFF, WireFormat.SUBTYPE_PRIMITIVE)
  }

  def toDateTimeVector(millis: LongVectorBuilder,
                       tz: IntVectorBuilder,
                       naMask: BitSet): ByteBuffer = {
    import DiffDateTimeVector._

    val intVectBuilder = AutoIntegralDVBuilders.IntDataVectBuilder
    val longVectBuilder = AutoIntegralDVBuilders.LongDataVectBuilder
    count += 1
    val fbb = new FlatBufferBuilder(getBuffer)
    val naOffset = populateNaMask(fbb, naMask, millis.length)

    val ((mOffset, mnbits), msigned) = longVectBuilder.buildDeltas(fbb, millis.data,
                                                               millis.min, millis.max)
    // Only build timezone vector if they are different.  Most DateTime's have same TZ
    val ((tOffset, tnbits), tsigned) = if (tz.min != tz.max) {
      intVectBuilder.buildDeltas(fbb, tz.data, tz.min, tz.max)
    } else {
      ((-1, -1), false)
    }

    startDiffDateTimeVector(fbb)
    addNaMask(fbb, naOffset)
    addVars(fbb, DDTVars.createDDTVars(fbb, millis.length, tz.min.toByte, millis.min))
    addMillisInfo(fbb, DataInfo.createDataInfo(fbb, mnbits, msigned))
    addMillis(fbb, mOffset)
    if (tOffset >= 0) {
      addTzInfo(fbb, DataInfo.createDataInfo(fbb, tnbits, tsigned))
      addTz(fbb, tOffset)
    }
    finishDiffDateTimeVectorBuffer(fbb, endDiffDateTimeVector(fbb))
    putHeaderAndGet(fbb, WireFormat.VECTORTYPE_DIFF, WireFormat.SUBTYPE_DATETIME)
  }
} 
Example 119
Source File: ConstEncoders.scala    From filo   with Apache License 2.0 5 votes vote down vote up
package org.velvia.filo.codecs

import com.google.flatbuffers.FlatBufferBuilder
import java.nio.{ByteBuffer, ByteOrder}
import scala.collection.mutable.BitSet

import org.velvia.filo._
import org.velvia.filo.vector._


  def toPrimitiveVector[A: PrimitiveDataVectBuilder](data: Seq[A],
                                                     naMask: BitSet,
                                                     min: A,
                                                     max: A): ByteBuffer = {
    import SimplePrimitiveVector._
    require(min == max)

    val vectBuilder = implicitly[PrimitiveDataVectBuilder[A]]
    count += 1
    val fbb = new FlatBufferBuilder(getBuffer)
    val naOffset = populateNaMask(fbb, naMask, data.length)
    val ((dataOffset, nbits), signed) = vectBuilder.build(fbb, Seq(min), min, max)
    startSimplePrimitiveVector(fbb)
    addNaMask(fbb, naOffset)
    addLen(fbb, data.length)
    addData(fbb, dataOffset)
    addInfo(fbb, DataInfo.createDataInfo(fbb, nbits, signed))
    finishSimplePrimitiveVectorBuffer(fbb, endSimplePrimitiveVector(fbb))
    putHeaderAndGet(fbb, WireFormat.VECTORTYPE_CONST, WireFormat.SUBTYPE_PRIMITIVE)
  }

  def toStringVector(str: String, len: Int, naMask: BitSet): ByteBuffer = {
    import ConstStringVector._

    count += 1
    val fbb = new FlatBufferBuilder(getBuffer)
    val naOffset = populateNaMask(fbb, naMask, len)
    val strOffset = fbb.createString(str)
    val offset = createConstStringVector(fbb, len, naOffset, strOffset)
    finishConstStringVectorBuffer(fbb, offset)
    putHeaderAndGet(fbb, WireFormat.VECTORTYPE_CONST, WireFormat.SUBTYPE_STRING)
  }
} 
Example 120
Source File: SimpleEncoders.scala    From filo   with Apache License 2.0 5 votes vote down vote up
package org.velvia.filo.codecs

import com.google.flatbuffers.FlatBufferBuilder
import java.nio.{ByteBuffer, ByteOrder}
import scala.collection.mutable.BitSet

import org.velvia.filo._
import org.velvia.filo.vector._


  def toPrimitiveVector[A: PrimitiveDataVectBuilder](data: Seq[A],
                                                     naMask: BitSet,
                                                     min: A,
                                                     max: A): ByteBuffer = {
    import SimplePrimitiveVector._

    val vectBuilder = implicitly[PrimitiveDataVectBuilder[A]]
    count += 1
    val fbb = new FlatBufferBuilder(getBuffer)
    val naOffset = populateNaMask(fbb, naMask, data.length)
    val ((dataOffset, nbits), signed) = vectBuilder.build(fbb, data, min, max)
    startSimplePrimitiveVector(fbb)
    addNaMask(fbb, naOffset)
    addLen(fbb, data.length)
    addData(fbb, dataOffset)
    addInfo(fbb, DataInfo.createDataInfo(fbb, nbits, signed))
    finishSimplePrimitiveVectorBuffer(fbb, endSimplePrimitiveVector(fbb))
    putHeaderAndGet(fbb, WireFormat.VECTORTYPE_SIMPLE, WireFormat.SUBTYPE_PRIMITIVE)
  }

  def toEmptyVector(len: Int): ByteBuffer = {
    val bb = ByteBuffer.allocate(4).order(ByteOrder.LITTLE_ENDIAN)
    bb.putInt(WireFormat.emptyVector(len))
    bb.position(0)
    bb
  }

  def toStringVector(data: Seq[String], naMask: BitSet): ByteBuffer = {
    val fbb = new FlatBufferBuilder(getBuffer)
    val naOffset = populateNaMask(fbb, naMask, data.length)
    val dataOffset = stringVect(fbb, data)
    val ssvOffset = SimpleStringVector.createSimpleStringVector(fbb, naOffset, dataOffset)
    SimpleStringVector.finishSimpleStringVectorBuffer(fbb, ssvOffset)
    putHeaderAndGet(fbb, WireFormat.VECTORTYPE_SIMPLE, WireFormat.SUBTYPE_STRING)
  }
} 
Example 121
Source File: NullableColumnAccessor.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.sql.catalyst.InternalRow

private[columnar] trait NullableColumnAccessor extends ColumnAccessor {
  private var nullsBuffer: ByteBuffer = _
  private var nullCount: Int = _
  private var seenNulls: Int = 0

  private var nextNullIndex: Int = _
  private var pos: Int = 0

  abstract override protected def initialize(): Unit = {
    nullsBuffer = underlyingBuffer.duplicate().order(ByteOrder.nativeOrder())
    nullCount = ByteBufferHelper.getInt(nullsBuffer)
    nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1
    pos = 0

    underlyingBuffer.position(underlyingBuffer.position + 4 + nullCount * 4)
    super.initialize()
  }

  abstract override def extractTo(row: InternalRow, ordinal: Int): Unit = {
    if (pos == nextNullIndex) {
      seenNulls += 1

      if (seenNulls < nullCount) {
        nextNullIndex = ByteBufferHelper.getInt(nullsBuffer)
      }

      row.setNullAt(ordinal)
    } else {
      super.extractTo(row, ordinal)
    }

    pos += 1
  }

  abstract override def hasNext: Boolean = seenNulls < nullCount || super.hasNext
} 
Example 122
Source File: NullableColumnBuilder.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.sql.catalyst.InternalRow


private[columnar] trait NullableColumnBuilder extends ColumnBuilder {
  protected var nulls: ByteBuffer = _
  protected var nullCount: Int = _
  private var pos: Int = _

  abstract override def initialize(
      initialSize: Int,
      columnName: String,
      useCompression: Boolean): Unit = {

    nulls = ByteBuffer.allocate(1024)
    nulls.order(ByteOrder.nativeOrder())
    pos = 0
    nullCount = 0
    super.initialize(initialSize, columnName, useCompression)
  }

  abstract override def appendFrom(row: InternalRow, ordinal: Int): Unit = {
    columnStats.gatherStats(row, ordinal)
    if (row.isNullAt(ordinal)) {
      nulls = ColumnBuilder.ensureFreeSpace(nulls, 4)
      nulls.putInt(pos)
      nullCount += 1
    } else {
      super.appendFrom(row, ordinal)
    }
    pos += 1
  }

  abstract override def build(): ByteBuffer = {
    val nonNulls = super.build()
    val nullDataLen = nulls.position()

    nulls.limit(nullDataLen)
    nulls.rewind()

    val buffer = ByteBuffer
      .allocate(4 + nullDataLen + nonNulls.remaining())
      .order(ByteOrder.nativeOrder())
      .putInt(nullCount)
      .put(nulls)
      .put(nonNulls)

    buffer.rewind()
    buffer
  }

  protected def buildNonNulls(): ByteBuffer = {
    nulls.limit(nulls.position()).rewind()
    super.build()
  }
} 
Example 123
Source File: CompressibleColumnBuilder.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar.compression

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.columnar.{ColumnBuilder, NativeColumnBuilder}
import org.apache.spark.sql.types.AtomicType
import org.apache.spark.unsafe.Platform


private[columnar] trait CompressibleColumnBuilder[T <: AtomicType]
  extends ColumnBuilder with Logging {

  this: NativeColumnBuilder[T] with WithCompressionSchemes =>

  var compressionEncoders: Seq[Encoder[T]] = _

  abstract override def initialize(
      initialSize: Int,
      columnName: String,
      useCompression: Boolean): Unit = {

    compressionEncoders =
      if (useCompression) {
        schemes.filter(_.supports(columnType)).map(_.encoder[T](columnType))
      } else {
        Seq(PassThrough.encoder(columnType))
      }
    super.initialize(initialSize, columnName, useCompression)
  }

  // The various compression schemes, while saving memory use, cause all of the data within
  // the row to become unaligned, thus causing crashes.  Until a way of fixing the compression
  // is found to also allow aligned accesses this must be disabled for SPARC.

  protected def isWorthCompressing(encoder: Encoder[T]) = {
    CompressibleColumnBuilder.unaligned && encoder.compressionRatio < 0.8
  }

  private def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = {
    compressionEncoders.foreach(_.gatherCompressibilityStats(row, ordinal))
  }

  abstract override def appendFrom(row: InternalRow, ordinal: Int): Unit = {
    super.appendFrom(row, ordinal)
    if (!row.isNullAt(ordinal)) {
      gatherCompressibilityStats(row, ordinal)
    }
  }

  override def build(): ByteBuffer = {
    val nonNullBuffer = buildNonNulls()
    val encoder: Encoder[T] = {
      val candidate = compressionEncoders.minBy(_.compressionRatio)
      if (isWorthCompressing(candidate)) candidate else PassThrough.encoder(columnType)
    }

    // Header = null count + null positions
    val headerSize = 4 + nulls.limit()
    val compressedSize = if (encoder.compressedSize == 0) {
      nonNullBuffer.remaining()
    } else {
      encoder.compressedSize
    }

    val compressedBuffer = ByteBuffer
      // Reserves 4 bytes for compression scheme ID
      .allocate(headerSize + 4 + compressedSize)
      .order(ByteOrder.nativeOrder)
      // Write the header
      .putInt(nullCount)
      .put(nulls)

    logDebug(s"Compressor for [$columnName]: $encoder, ratio: ${encoder.compressionRatio}")
    encoder.compress(nonNullBuffer, compressedBuffer)
  }
}

private[columnar] object CompressibleColumnBuilder {
  val unaligned = Platform.unaligned()
} 
Example 124
Source File: ADITrace.scala    From ofdm   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package ieee80211

import java.io._
import java.nio.{ByteBuffer, ByteOrder}

import breeze.math.Complex

import org.tukaani.xz._

object ADITrace {
  // These are compressed with xz
  // Need to be decompressed
  def binary(stream: InputStream): Seq[Complex] = {
    // Decompress input stream
    val xz  = new XZInputStream(stream)

    // sometimes, java is dumb
    val buf = new ByteArrayOutputStream()

    var nRead: Int = 0
    var keepGoing: Boolean = true
    val data = new Array[Byte](16384)

    while (keepGoing) {
      nRead = xz.read(data, 0, data.length)
      if (nRead != -1) {
        buf.write(data, 0, nRead)
      } else {
        keepGoing = false
      }
    }

    val bytes = buf.toByteArray

    val bb = ByteBuffer.wrap(bytes)
    bb.order(ByteOrder.LITTLE_ENDIAN)
    val shorts = new Array[Short]((bytes.length + 1) / 2)
    bb.asShortBuffer().get(shorts)

    shorts.grouped(2).map { case Array(r, i) =>
      Complex(r.toDouble / 32768.0, i.toDouble / 32768.0)
    }.toSeq
  }

  def text(stream: InputStream): Seq[Complex] = {
    scala.io.Source.fromInputStream(stream).getLines().map {
      case "TEXT" => Complex(0, 0)
      case s =>
        val real :: imag :: Nil = s.split("\t").toList
        Complex(real.toDouble / 32768.0, imag.toDouble / 32768.0)
    }.toSeq.tail
  }

  def resourceStream(resource: String): InputStream = {
    val toret = getClass.getResourceAsStream(resource)
    require(toret != null, "Bad resource")
    toret
  }

  def fileStream(name: String): InputStream = {
    val toret = new FileInputStream(name)
    require(toret != null, "Bad resource")
    toret
  }

  def binaryResource(resource: String): Seq[Complex] = {
    binary(resourceStream(resource))
  }
  def binaryFile(name: String): Seq[Complex] = {
    binary(fileStream(name))
  }

  def textResource(resource: String): Seq[Complex] = {
    text(resourceStream(resource))
  }
  def textFile(name: String): Seq[Complex] = {
    text(fileStream(name))
  }
}

object ADITraceMain {
  def main(arg: Array[String]): Unit = {
    val output = ADITrace.binaryResource("/waveforms/wifi-bpsk-loopback-cable.dat.xz")
    val input  = ADITrace.textResource("/waveforms/wifi_bpsk.txt")

    println(s"Input = $input")
    println(s"Output = $output")
    println()
  }
} 
Example 125
Source File: BsonSpec.scala    From tepkin   with Apache License 2.0 5 votes vote down vote up
package net.fehmicansaglam.bson

import java.nio.ByteOrder

import net.fehmicansaglam.bson.BsonDsl._
import net.fehmicansaglam.bson.element.BsonObjectId
import net.fehmicansaglam.bson.reader.BsonDocumentReader
import org.joda.time.DateTime
import org.scalatest.OptionValues._
import org.scalatest.{Matchers, WordSpec}

class BsonSpec extends WordSpec with Matchers {

  "Bson" must {

    val document = $document(
      "_id" := BsonObjectId.generate,
      "name" := "jack",
      "age" := 18,
      "months" := $array(1, 2, 3),
      "details" := $document(
        "salary" := 455.5,
        "inventory" := $array("a", 3.5, 1L, true),
        "birthday" := new DateTime(1987, 3, 5, 0, 0),
        "personal" := $document(
          "foo" := "bar"
        )
      )
    )

    "encode and decode BsonDocument" in {
      val encoded = document.encode
      val buffer = encoded.asByteBuffer
      buffer.order(ByteOrder.LITTLE_ENDIAN)
      val actual = BsonDocumentReader.read(buffer)
      actual.value shouldBe document
    }

    "get nested values" in {
      document.getAs[Int]("age").value shouldBe 18
      document.getAs[Double]("details.salary").value shouldBe 455.5
      document.getAs[String]("details.personal.foo").value shouldBe "bar"
      document.getAsList[Any]("details.inventory").value shouldBe List("a", 3.5, 1L, true)

      val details = document.getAs[BsonDocument]("details").get
      details.getAs[Double]("salary").value shouldBe 455.5

      val personal = document.getAs[BsonDocument]("details.personal").get
      personal.getAs[String]("foo").value shouldBe "bar"
    }

    "handle (deeply) nested collections" in {
      val expected = $document(
        "name" := "jack",
        "age" := 18,
        "months" := $array(1, 2, 3),
        "details" := $document(
          "salary" := 455.5,
          "inventory" := $array("a", 3.5, 1L, true, $document("nested" := "document")),
          "birthday" := new DateTime(1987, 3, 5, 0, 0),
          "personal" := $document(
            "foo" := "bar",
            $null("null_value")
          )
        )
      )

      val actual = BsonDocument.from(Map(
        "name" -> "jack",
        "age" -> 18,
        "months" -> List(1, 2, 3),
        "details" -> Map(
          "salary" -> 455.5,
          "inventory" -> List("a", 3.5, 1L, true, Map("nested" -> "document")),
          "birthday" -> new DateTime(1987, 3, 5, 0, 0),
          "personal" -> Map(
            "foo" -> "bar",
            "null_value" -> null
          )
        )
      ))

      actual shouldBe expected
    }
  }
} 
Example 126
Source File: RconConnector.scala    From chatoverflow   with Eclipse Public License 2.0 5 votes vote down vote up
package org.codeoverflow.chatoverflow.requirement.service.rcon

import java.io.{DataInputStream, IOException, InputStream, OutputStream}
import java.net.{Socket, SocketException}
import java.nio.{ByteBuffer, ByteOrder}
import java.util.Random

import org.codeoverflow.chatoverflow.WithLogger
import org.codeoverflow.chatoverflow.connector.Connector

class RconConnector(override val sourceIdentifier: String) extends Connector(sourceIdentifier) with WithLogger {
  override protected var requiredCredentialKeys: List[String] = List("password", "address")
  override protected var optionalCredentialKeys: List[String] = List("port")

  private var socket: Socket = _
  private var outputStream: OutputStream = _
  private var inputStream: InputStream = _
  private var requestId: Int = 0

  def sendCommand(command: String): String = {
    logger debug s"Sending $command to RCON"
    requestId += 1
    if (write(2, command.getBytes("ASCII"))) {
      return read()
    }
    null
  }


  
  override def stop(): Boolean = {
    logger info s"Stopped RCON connector to ${credentials.get.getValue("address").get}!"
    socket.close()
    true
  }
} 
Example 127
Source File: PredictionLogger.scala    From ForestFlow   with Apache License 2.0 5 votes vote down vote up
package ai.forestflow.event.subscribers

import java.nio.ByteOrder

import ai.forestflow.domain.{PredictionEvent, PredictionEventGP}
import ai.forestflow.serving.config.ApplicationEnvironment
import akka.actor.{Actor, ActorLogging, Props}
import akka.kafka.ProducerSettings
import ai.forestflow.domain.{PredictionEvent, PredictionEventGP}
import graphpipe.InferRequest
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer}
//import scalapb.json4s.JsonFormat

import scala.util.{Success, Try}

object PredictionLogger {
  

  private lazy val binaryProducerSettings =
    ProducerSettings(producerConfig, new StringSerializer, new ByteArraySerializer)
  private lazy val binaryProducer = binaryProducerSettings.createKafkaProducer()

  override def preStart(): Unit = {
    if (basic_topic.isDefined)
      context.system.eventStream.subscribe(self, classOf[PredictionEvent])

    if (gp_topic.isDefined)
      context.system.eventStream.subscribe(self, classOf[PredictionEventGP])
    super.preStart()
  }
  override def receive: Receive = {
    case event@PredictionEvent(prediction, servedRequest, inferenceRequest, loggingSettings) =>

      val key = loggingSettings
        .keyFeatures
        .flatMap(inferenceRequest.configs.get)
        .mkString(loggingSettings.getKeyFeaturesSeparator)

      if (key.length > 0 )
        binaryProducer.send(new ProducerRecord(basic_topic.get, key, event.toByteArray))
      else
        binaryProducer.send(new ProducerRecord(basic_topic.get, event.toByteArray))

    case event@PredictionEventGP(prediction, servedRequest, inferBytes, loggingSettings) =>
      Try {
        val req = graphpipe.Request.getRootAsRequest(inferBytes.asReadOnlyByteBuffer().order(ByteOrder.LITTLE_ENDIAN))
        val inferRequest = req.req(new InferRequest()).asInstanceOf[InferRequest]
        val inferConfigs = inferRequest.config()
          .split(",")
          .map(_.split(":"))
          .flatMap{ case Array(k, v) =>  Some((k, v)) case _ => None}.toMap

        loggingSettings
          .keyFeatures
          .flatMap(inferConfigs.get)
          .mkString(loggingSettings.getKeyFeaturesSeparator)

      } match {
        case Success(key) =>
          binaryProducer.send(new ProducerRecord(gp_topic.get, key, event.toByteArray))
        case _ =>
          binaryProducer.send(new ProducerRecord(gp_topic.get, event.toByteArray))
      }

    case _ => // ignore
  }
} 
Example 128
Source File: WordEmbeddingsWriter.scala    From spark-nlp   with Apache License 2.0 5 votes vote down vote up
package com.johnsnowlabs.nlp.embeddings

import java.nio.{ByteBuffer, ByteOrder}

import com.johnsnowlabs.storage.{RocksDBConnection, StorageBatchWriter}

class WordEmbeddingsWriter(
                                override val connection: RocksDBConnection,
                                caseSensitiveIndex: Boolean,
                                dimension: Int,
                                maxCacheSize: Int,
                                writeBuffer: Int
                          )
  extends StorageBatchWriter[Array[Float]] with ReadsFromBytes {

  override protected def writeBufferSize: Int = writeBuffer

  override def toBytes(content: Array[Float]): Array[Byte] = {
    val buffer = ByteBuffer.allocate(content.length * 4)
    buffer.order(ByteOrder.LITTLE_ENDIAN)
    for (value <- content) {
      buffer.putFloat(value)
    }
    buffer.array()
  }

} 
Example 129
Source File: WordEmbeddingsReader.scala    From spark-nlp   with Apache License 2.0 5 votes vote down vote up
package com.johnsnowlabs.nlp.embeddings

import java.nio.{ByteBuffer, ByteOrder}

import com.johnsnowlabs.storage.{RocksDBConnection, StorageReader}

class WordEmbeddingsReader(
                            override val connection: RocksDBConnection,
                            override val caseSensitiveIndex: Boolean,
                            dimension: Int,
                            maxCacheSize: Int
                          )
  extends StorageReader[Array[Float]] with ReadsFromBytes {

  override def emptyValue: Array[Float] = Array.fill[Float](dimension)(0f)

  override protected def readCacheSize: Int = maxCacheSize
} 
Example 130
Source File: ReadsFromBytes.scala    From spark-nlp   with Apache License 2.0 5 votes vote down vote up
package com.johnsnowlabs.nlp.embeddings

import java.nio.{ByteBuffer, ByteOrder}

trait ReadsFromBytes {

  def fromBytes(source: Array[Byte]): Array[Float] = {
    val wrapper = ByteBuffer.wrap(source)
    wrapper.order(ByteOrder.LITTLE_ENDIAN)
    val result = Array.fill[Float](source.length / 4)(0f)

    for (i <- result.indices) {
      result(i) = wrapper.getFloat(i * 4)
    }
    result
  }

} 
Example 131
Source File: NullableColumnAccessor.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.sql.catalyst.InternalRow

private[columnar] trait NullableColumnAccessor extends ColumnAccessor {
  private var nullsBuffer: ByteBuffer = _
  private var nullCount: Int = _
  private var seenNulls: Int = 0

  private var nextNullIndex: Int = _
  private var pos: Int = 0

  abstract override protected def initialize(): Unit = {
    nullsBuffer = underlyingBuffer.duplicate().order(ByteOrder.nativeOrder())
    nullCount = ByteBufferHelper.getInt(nullsBuffer)
    nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1
    pos = 0

    underlyingBuffer.position(underlyingBuffer.position + 4 + nullCount * 4)
    super.initialize()
  }

  abstract override def extractTo(row: InternalRow, ordinal: Int): Unit = {
    if (pos == nextNullIndex) {
      seenNulls += 1

      if (seenNulls < nullCount) {
        nextNullIndex = ByteBufferHelper.getInt(nullsBuffer)
      }

      row.setNullAt(ordinal)
    } else {
      super.extractTo(row, ordinal)
    }

    pos += 1
  }

  abstract override def hasNext: Boolean = seenNulls < nullCount || super.hasNext
} 
Example 132
Source File: NullableColumnBuilder.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.sql.catalyst.InternalRow


private[columnar] trait NullableColumnBuilder extends ColumnBuilder {
  protected var nulls: ByteBuffer = _
  protected var nullCount: Int = _
  private var pos: Int = _

  abstract override def initialize(
      initialSize: Int,
      columnName: String,
      useCompression: Boolean): Unit = {

    nulls = ByteBuffer.allocate(1024)
    nulls.order(ByteOrder.nativeOrder())
    pos = 0
    nullCount = 0
    super.initialize(initialSize, columnName, useCompression)
  }

  abstract override def appendFrom(row: InternalRow, ordinal: Int): Unit = {
    columnStats.gatherStats(row, ordinal)
    if (row.isNullAt(ordinal)) {
      nulls = ColumnBuilder.ensureFreeSpace(nulls, 4)
      nulls.putInt(pos)
      nullCount += 1
    } else {
      super.appendFrom(row, ordinal)
    }
    pos += 1
  }

  abstract override def build(): ByteBuffer = {
    val nonNulls = super.build()
    val nullDataLen = nulls.position()

    nulls.limit(nullDataLen)
    nulls.rewind()

    val buffer = ByteBuffer
      .allocate(4 + nullDataLen + nonNulls.remaining())
      .order(ByteOrder.nativeOrder())
      .putInt(nullCount)
      .put(nulls)
      .put(nonNulls)

    buffer.rewind()
    buffer
  }

  protected def buildNonNulls(): ByteBuffer = {
    nulls.limit(nulls.position()).rewind()
    super.build()
  }
} 
Example 133
Source File: CompressibleColumnBuilder.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar.compression

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.columnar.{ColumnBuilder, NativeColumnBuilder}
import org.apache.spark.sql.types.AtomicType
import org.apache.spark.unsafe.Platform


private[columnar] trait CompressibleColumnBuilder[T <: AtomicType]
  extends ColumnBuilder with Logging {

  this: NativeColumnBuilder[T] with WithCompressionSchemes =>

  var compressionEncoders: Seq[Encoder[T]] = _

  abstract override def initialize(
      initialSize: Int,
      columnName: String,
      useCompression: Boolean): Unit = {

    compressionEncoders =
      if (useCompression) {
        schemes.filter(_.supports(columnType)).map(_.encoder[T](columnType))
      } else {
        Seq(PassThrough.encoder(columnType))
      }
    super.initialize(initialSize, columnName, useCompression)
  }

  // The various compression schemes, while saving memory use, cause all of the data within
  // the row to become unaligned, thus causing crashes.  Until a way of fixing the compression
  // is found to also allow aligned accesses this must be disabled for SPARC.

  protected def isWorthCompressing(encoder: Encoder[T]) = {
    CompressibleColumnBuilder.unaligned && encoder.compressionRatio < 0.8
  }

  private def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = {
    compressionEncoders.foreach(_.gatherCompressibilityStats(row, ordinal))
  }

  abstract override def appendFrom(row: InternalRow, ordinal: Int): Unit = {
    super.appendFrom(row, ordinal)
    if (!row.isNullAt(ordinal)) {
      gatherCompressibilityStats(row, ordinal)
    }
  }

  override def build(): ByteBuffer = {
    val nonNullBuffer = buildNonNulls()
    val encoder: Encoder[T] = {
      val candidate = compressionEncoders.minBy(_.compressionRatio)
      if (isWorthCompressing(candidate)) candidate else PassThrough.encoder(columnType)
    }

    // Header = null count + null positions
    val headerSize = 4 + nulls.limit()
    val compressedSize = if (encoder.compressedSize == 0) {
      nonNullBuffer.remaining()
    } else {
      encoder.compressedSize
    }

    val compressedBuffer = ByteBuffer
      // Reserves 4 bytes for compression scheme ID
      .allocate(headerSize + 4 + compressedSize)
      .order(ByteOrder.nativeOrder)
      // Write the header
      .putInt(nullCount)
      .put(nulls)

    logDebug(s"Compressor for [$columnName]: $encoder, ratio: ${encoder.compressionRatio}")
    encoder.compress(nonNullBuffer, compressedBuffer)
  }
}

private[columnar] object CompressibleColumnBuilder {
  val unaligned = Platform.unaligned()
} 
Example 134
Source File: NullableColumnAccessor.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.columnar

import java.nio.{ByteOrder, ByteBuffer}

import org.apache.spark.sql.catalyst.expressions.MutableRow

private[sql] trait NullableColumnAccessor extends ColumnAccessor {
  private var nullsBuffer: ByteBuffer = _
  private var nullCount: Int = _
  private var seenNulls: Int = 0

  private var nextNullIndex: Int = _
  private var pos: Int = 0

  abstract override protected def initialize(): Unit = {
    nullsBuffer = underlyingBuffer.duplicate().order(ByteOrder.nativeOrder())
    nullCount = nullsBuffer.getInt()
    nextNullIndex = if (nullCount > 0) nullsBuffer.getInt() else -1
    pos = 0

    underlyingBuffer.position(underlyingBuffer.position + 4 + nullCount * 4)
    super.initialize()
  }

  abstract override def extractTo(row: MutableRow, ordinal: Int): Unit = {
    if (pos == nextNullIndex) {
      seenNulls += 1

      if (seenNulls < nullCount) {
        nextNullIndex = nullsBuffer.getInt()
      }

      row.setNullAt(ordinal)
    } else {
      super.extractTo(row, ordinal)
    }

    pos += 1
  }

  abstract override def hasNext: Boolean = seenNulls < nullCount || super.hasNext
} 
Example 135
Source File: NullableColumnBuilder.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.columnar

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.sql.Row


private[sql] trait NullableColumnBuilder extends ColumnBuilder {
  protected var nulls: ByteBuffer = _
  protected var nullCount: Int = _
  private var pos: Int = _

  abstract override def initialize(
      initialSize: Int,
      columnName: String,
      useCompression: Boolean): Unit = {

    nulls = ByteBuffer.allocate(1024)
    nulls.order(ByteOrder.nativeOrder())
    pos = 0
    nullCount = 0
    super.initialize(initialSize, columnName, useCompression)
  }

  abstract override def appendFrom(row: Row, ordinal: Int): Unit = {
    columnStats.gatherStats(row, ordinal)
    if (row.isNullAt(ordinal)) {
      nulls = ColumnBuilder.ensureFreeSpace(nulls, 4)
      nulls.putInt(pos)
      nullCount += 1
    } else {
      super.appendFrom(row, ordinal)
    }
    pos += 1
  }

  abstract override def build(): ByteBuffer = {
    val nonNulls = super.build()
    val typeId = nonNulls.getInt()
    val nullDataLen = nulls.position()

    nulls.limit(nullDataLen)
    nulls.rewind()

    val buffer = ByteBuffer
      .allocate(4 + 4 + nullDataLen + nonNulls.remaining())
      .order(ByteOrder.nativeOrder())
      .putInt(typeId)
      .putInt(nullCount)
      .put(nulls)
      .put(nonNulls)

    buffer.rewind()
    buffer
  }

  protected def buildNonNulls(): ByteBuffer = {
    nulls.limit(nulls.position()).rewind()
    super.build()
  }
} 
Example 136
Source File: CompressibleColumnBuilder.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.columnar.compression

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.Logging
import org.apache.spark.sql.Row
import org.apache.spark.sql.columnar.{ColumnBuilder, NativeColumnBuilder}
import org.apache.spark.sql.types.AtomicType


private[sql] trait CompressibleColumnBuilder[T <: AtomicType]
  extends ColumnBuilder with Logging {

  this: NativeColumnBuilder[T] with WithCompressionSchemes =>

  var compressionEncoders: Seq[Encoder[T]] = _

  abstract override def initialize(
      initialSize: Int,
      columnName: String,
      useCompression: Boolean): Unit = {

    compressionEncoders =
      if (useCompression) {
        schemes.filter(_.supports(columnType)).map(_.encoder[T](columnType))
      } else {
        Seq(PassThrough.encoder(columnType))
      }
    super.initialize(initialSize, columnName, useCompression)
  }

  protected def isWorthCompressing(encoder: Encoder[T]) = {
    encoder.compressionRatio < 0.8
  }

  private def gatherCompressibilityStats(row: Row, ordinal: Int): Unit = {
    var i = 0
    while (i < compressionEncoders.length) {
      compressionEncoders(i).gatherCompressibilityStats(row, ordinal)
      i += 1
    }
  }

  abstract override def appendFrom(row: Row, ordinal: Int): Unit = {
    super.appendFrom(row, ordinal)
    if (!row.isNullAt(ordinal)) {
      gatherCompressibilityStats(row, ordinal)
    }
  }

  override def build(): ByteBuffer = {
    val nonNullBuffer = buildNonNulls()
    val typeId = nonNullBuffer.getInt()
    val encoder: Encoder[T] = {
      val candidate = compressionEncoders.minBy(_.compressionRatio)
      if (isWorthCompressing(candidate)) candidate else PassThrough.encoder(columnType)
    }

    // Header = column type ID + null count + null positions
    val headerSize = 4 + 4 + nulls.limit()
    val compressedSize = if (encoder.compressedSize == 0) {
      nonNullBuffer.remaining()
    } else {
      encoder.compressedSize
    }

    val compressedBuffer = ByteBuffer
      // Reserves 4 bytes for compression scheme ID
      .allocate(headerSize + 4 + compressedSize)
      .order(ByteOrder.nativeOrder)
      // Write the header
      .putInt(typeId)
      .putInt(nullCount)
      .put(nulls)

    logDebug(s"Compressor for [$columnName]: $encoder, ratio: ${encoder.compressionRatio}")
    encoder.compress(nonNullBuffer, compressedBuffer)
  }
} 
Example 137
Source File: CompressionScheme.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.columnar.compression

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.expressions.MutableRow
import org.apache.spark.sql.columnar.{ColumnType, NativeColumnType}
import org.apache.spark.sql.types.AtomicType

private[sql] trait Encoder[T <: AtomicType] {
  def gatherCompressibilityStats(row: Row, ordinal: Int): Unit = {}

  def compressedSize: Int

  def uncompressedSize: Int

  def compressionRatio: Double = {
    if (uncompressedSize > 0) compressedSize.toDouble / uncompressedSize else 1.0
  }

  def compress(from: ByteBuffer, to: ByteBuffer): ByteBuffer
}

private[sql] trait Decoder[T <: AtomicType] {
  def next(row: MutableRow, ordinal: Int): Unit

  def hasNext: Boolean
}

private[sql] trait CompressionScheme {
  def typeId: Int

  def supports(columnType: ColumnType[_, _]): Boolean

  def encoder[T <: AtomicType](columnType: NativeColumnType[T]): Encoder[T]

  def decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T]): Decoder[T]
}

private[sql] trait WithCompressionSchemes {
  def schemes: Seq[CompressionScheme]
}

private[sql] trait AllCompressionSchemes extends WithCompressionSchemes {
  override val schemes: Seq[CompressionScheme] = CompressionScheme.all
}

private[sql] object CompressionScheme {
  val all: Seq[CompressionScheme] =
    Seq(PassThrough, RunLengthEncoding, DictionaryEncoding, BooleanBitSet, IntDelta, LongDelta)

  private val typeIdToScheme = all.map(scheme => scheme.typeId -> scheme).toMap

  def apply(typeId: Int): CompressionScheme = {
    typeIdToScheme.getOrElse(typeId, throw new UnsupportedOperationException(
      s"Unrecognized compression scheme type ID: $typeId"))
  }

  def columnHeaderSize(columnBuffer: ByteBuffer): Int = {
    val header = columnBuffer.duplicate().order(ByteOrder.nativeOrder)
    val nullCount = header.getInt(4)
    // Column type ID + null count + null positions
    4 + 4 + 4 * nullCount
  }
} 
Example 138
Source File: NanoTime.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.parquet.timestamp

import java.nio.{ByteBuffer, ByteOrder}

import parquet.Preconditions
import parquet.io.api.{Binary, RecordConsumer}

private[parquet] class NanoTime extends Serializable {
  private var julianDay = 0
  private var timeOfDayNanos = 0L

  def set(julianDay: Int, timeOfDayNanos: Long): this.type = {
    this.julianDay = julianDay
    this.timeOfDayNanos = timeOfDayNanos
    this
  }

  def getJulianDay: Int = julianDay

  def getTimeOfDayNanos: Long = timeOfDayNanos

  def toBinary: Binary = {
    val buf = ByteBuffer.allocate(12)
    buf.order(ByteOrder.LITTLE_ENDIAN)
    buf.putLong(timeOfDayNanos)
    buf.putInt(julianDay)
    buf.flip()
    Binary.fromByteBuffer(buf)
  }

  def writeValue(recordConsumer: RecordConsumer): Unit = {
    recordConsumer.addBinary(toBinary)
  }

  override def toString: String =
    "NanoTime{julianDay=" + julianDay + ", timeOfDayNanos=" + timeOfDayNanos + "}"
}

private[sql] object NanoTime {
  def fromBinary(bytes: Binary): NanoTime = {
    Preconditions.checkArgument(bytes.length() == 12, "Must be 12 bytes")
    val buf = bytes.toByteBuffer
    buf.order(ByteOrder.LITTLE_ENDIAN)
    val timeOfDayNanos = buf.getLong
    val julianDay = buf.getInt
    new NanoTime().set(julianDay, timeOfDayNanos)
  }

  def apply(julianDay: Int, timeOfDayNanos: Long): NanoTime = {
    new NanoTime().set(julianDay, timeOfDayNanos)
  }
} 
Example 139
Source File: ByteCodable.scala    From tensorflow_scala   with Apache License 2.0 5 votes vote down vote up
package org.platanios.tensorflow.api.utilities

import java.nio.{ByteBuffer, ByteOrder}

import org.platanios.tensorflow.api.core.Shape
import org.platanios.tensorflow.api.core.types.{DataType, TF}

// TODO: Support more data structures (e.g., using shapeless), and generalize `Seq` to collections.

trait ByteCodable[T] {
  type Scalar

  def byteCount(value: T): Int
  def convertToByteArray(value: T): (Array[Byte], Shape)
}

object ByteCodable {
  def apply[T](implicit ev: ByteCodable[T]): Aux[T, ev.Scalar] = {
    ev.asInstanceOf[Aux[T, ev.Scalar]]
  }

  type Aux[T, S] = ByteCodable[T] {
    type Scalar = S
  }

  implicit def dataTypeByteCodable[T: TF]: Aux[T, T] = new ByteCodable[T] {
    override type Scalar = T

    override def byteCount(value: T): Int = TF[T].dataType.nativeByteSize.get

    override def convertToByteArray(value: T): (Array[Byte], Shape) = {
      val buffer = ByteBuffer.allocate(byteCount(value)).order(ByteOrder.LITTLE_ENDIAN)
      DataType.putElementInBuffer(buffer, 0, value)
      (buffer.array(), Shape())
    }
  }

  implicit def arrayByteCodable[T](implicit ev: ByteCodable[T]): Aux[Array[T], ev.Scalar] = new ByteCodable[Array[T]] {
    override type Scalar = ev.Scalar

    override def byteCount(value: Array[T]): Int = value.map(ByteCodable[T].byteCount).sum

    override def convertToByteArray(value: Array[T]): (Array[Byte], Shape) = {
      val results = value.map(ByteCodable[T].convertToByteArray)
      require(
        results.forall(_._2.asArray.sameElements(results.head._2.asArray)),
        "All nested arrays must have the same size.")
      (results.flatMap(_._1), Shape(value.length) ++ results.head._2)
    }
  }

  implicit def seqByteCodable[T](implicit ev: ByteCodable[T]): Aux[Seq[T], ev.Scalar] = new ByteCodable[Seq[T]] {
    override type Scalar = ev.Scalar

    override def byteCount(value: Seq[T]): Int = value.map(ByteCodable[T].byteCount).sum

    override def convertToByteArray(value: Seq[T]): (Array[Byte], Shape) = {
      val results = value.map(ByteCodable[T].convertToByteArray)
      require(
        results.forall(_._2.asArray.sameElements(results.head._2.asArray)),
        "All nested arrays must have the same size.")
      (results.flatMap(_._1).toArray, Shape(value.length) ++ results.head._2)
    }
  }
} 
Example 140
Source File: NullableColumnAccessor.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.columnar

import java.nio.{ByteOrder, ByteBuffer}

import org.apache.spark.sql.catalyst.expressions.MutableRow

private[sql] trait NullableColumnAccessor extends ColumnAccessor {
  private var nullsBuffer: ByteBuffer = _
  private var nullCount: Int = _
  private var seenNulls: Int = 0

  private var nextNullIndex: Int = _
  private var pos: Int = 0

  abstract override protected def initialize(): Unit = {
    nullsBuffer = underlyingBuffer.duplicate().order(ByteOrder.nativeOrder())
    nullCount = nullsBuffer.getInt()
    nextNullIndex = if (nullCount > 0) nullsBuffer.getInt() else -1
    pos = 0

    underlyingBuffer.position(underlyingBuffer.position + 4 + nullCount * 4)
    super.initialize()
  }

  abstract override def extractTo(row: MutableRow, ordinal: Int): Unit = {
    if (pos == nextNullIndex) {
      seenNulls += 1

      if (seenNulls < nullCount) {
        nextNullIndex = nullsBuffer.getInt()
      }

      row.setNullAt(ordinal)
    } else {
      super.extractTo(row, ordinal)
    }

    pos += 1
  }

  abstract override def hasNext: Boolean = seenNulls < nullCount || super.hasNext
} 
Example 141
Source File: NullableColumnBuilder.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.columnar

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.sql.catalyst.InternalRow


private[sql] trait NullableColumnBuilder extends ColumnBuilder {
  protected var nulls: ByteBuffer = _
  protected var nullCount: Int = _
  private var pos: Int = _

  abstract override def initialize(
      initialSize: Int,
      columnName: String,
      useCompression: Boolean): Unit = {
    //ByteBuffer.allocate在能够读和写之前,必须有一个缓冲区,用静态方法 allocate() 来分配缓冲区
    nulls = ByteBuffer.allocate(1024)
    nulls.order(ByteOrder.nativeOrder())
    pos = 0
    nullCount = 0
    super.initialize(initialSize, columnName, useCompression)
  }

  abstract override def appendFrom(row: InternalRow, ordinal: Int): Unit = {
    columnStats.gatherStats(row, ordinal)
    if (row.isNullAt(ordinal)) {
      nulls = ColumnBuilder.ensureFreeSpace(nulls, 4)
      nulls.putInt(pos)
      nullCount += 1
    } else {
      super.appendFrom(row, ordinal)
    }
    pos += 1
  }

  abstract override def build(): ByteBuffer = {
    val nonNulls = super.build()
    val typeId = nonNulls.getInt()
    val nullDataLen = nulls.position()

    nulls.limit(nullDataLen)
    nulls.rewind()

    val buffer = ByteBuffer
      .allocate(4 + 4 + nullDataLen + nonNulls.remaining())
      .order(ByteOrder.nativeOrder())
      .putInt(typeId)
      .putInt(nullCount)
      .put(nulls)
      .put(nonNulls)

    buffer.rewind()
    buffer
  }

  protected def buildNonNulls(): ByteBuffer = {
    nulls.limit(nulls.position()).rewind()
    super.build()
  }
} 
Example 142
Source File: CompressibleColumnBuilder.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.columnar.compression

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.columnar.{ColumnBuilder, NativeColumnBuilder}
import org.apache.spark.sql.types.AtomicType


private[sql] trait CompressibleColumnBuilder[T <: AtomicType]
  extends ColumnBuilder with Logging {

  this: NativeColumnBuilder[T] with WithCompressionSchemes =>

  var compressionEncoders: Seq[Encoder[T]] = _

  abstract override def initialize(
      initialSize: Int,
      columnName: String,
      useCompression: Boolean): Unit = {

    compressionEncoders =
      if (useCompression) {
        schemes.filter(_.supports(columnType)).map(_.encoder[T](columnType))
      } else {
        Seq(PassThrough.encoder(columnType))
      }
    super.initialize(initialSize, columnName, useCompression)
  }

  protected def isWorthCompressing(encoder: Encoder[T]) = {
    encoder.compressionRatio < 0.8
  }

  private def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = {
    var i = 0
    while (i < compressionEncoders.length) {
      compressionEncoders(i).gatherCompressibilityStats(row, ordinal)
      i += 1
    }
  }

  abstract override def appendFrom(row: InternalRow, ordinal: Int): Unit = {
    super.appendFrom(row, ordinal)
    if (!row.isNullAt(ordinal)) {
      gatherCompressibilityStats(row, ordinal)
    }
  }

  override def build(): ByteBuffer = {
    val nonNullBuffer = buildNonNulls()
    val typeId = nonNullBuffer.getInt()
    val encoder: Encoder[T] = {
      val candidate = compressionEncoders.minBy(_.compressionRatio)
      if (isWorthCompressing(candidate)) candidate else PassThrough.encoder(columnType)
    }

    // Header = column type ID + null count + null positions
    val headerSize = 4 + 4 + nulls.limit()
    val compressedSize = if (encoder.compressedSize == 0) {
      nonNullBuffer.remaining()
    } else {
      encoder.compressedSize
    }

    val compressedBuffer = ByteBuffer
      // Reserves 4 bytes for compression scheme ID
      .allocate(headerSize + 4 + compressedSize)
      .order(ByteOrder.nativeOrder)
      // Write the header
      .putInt(typeId)
      .putInt(nullCount)
      .put(nulls)

    logDebug(s"Compressor for [$columnName]: $encoder, ratio: ${encoder.compressionRatio}")
    encoder.compress(nonNullBuffer, compressedBuffer)
  }
} 
Example 143
Source File: CompressionScheme.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.columnar.compression

import java.nio.{ByteBuffer, ByteOrder}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.MutableRow
import org.apache.spark.sql.columnar.{ColumnType, NativeColumnType}
import org.apache.spark.sql.types.AtomicType

private[sql] trait Encoder[T <: AtomicType] {
  def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = {}

  def compressedSize: Int

  def uncompressedSize: Int

  def compressionRatio: Double = {
    if (uncompressedSize > 0) compressedSize.toDouble / uncompressedSize else 1.0
  }

  def compress(from: ByteBuffer, to: ByteBuffer): ByteBuffer
}

private[sql] trait Decoder[T <: AtomicType] {
  def next(row: MutableRow, ordinal: Int): Unit

  def hasNext: Boolean
}

private[sql] trait CompressionScheme {
  def typeId: Int

  def supports(columnType: ColumnType[_]): Boolean

  def encoder[T <: AtomicType](columnType: NativeColumnType[T]): Encoder[T]

  def decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T]): Decoder[T]
}

private[sql] trait WithCompressionSchemes {
  def schemes: Seq[CompressionScheme]
}

private[sql] trait AllCompressionSchemes extends WithCompressionSchemes {
  override val schemes: Seq[CompressionScheme] = CompressionScheme.all
}

private[sql] object CompressionScheme {
  val all: Seq[CompressionScheme] =
    Seq(PassThrough, RunLengthEncoding, DictionaryEncoding, BooleanBitSet, IntDelta, LongDelta)

  private val typeIdToScheme = all.map(scheme => scheme.typeId -> scheme).toMap

  def apply(typeId: Int): CompressionScheme = {
    typeIdToScheme.getOrElse(typeId, throw new UnsupportedOperationException(
      s"Unrecognized compression scheme type ID: $typeId"))
  }

  def columnHeaderSize(columnBuffer: ByteBuffer): Int = {
    val header = columnBuffer.duplicate().order(ByteOrder.nativeOrder)
    val nullCount = header.getInt(4)
    // Column type ID + null count + null positions
    4 + 4 + 4 * nullCount
  }
} 
Example 144
Source File: NullableColumnAccessor.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.sql.catalyst.InternalRow

private[columnar] trait NullableColumnAccessor extends ColumnAccessor {
  private var nullsBuffer: ByteBuffer = _
  private var nullCount: Int = _
  private var seenNulls: Int = 0

  private var nextNullIndex: Int = _
  private var pos: Int = 0

  abstract override protected def initialize(): Unit = {
    nullsBuffer = underlyingBuffer.duplicate().order(ByteOrder.nativeOrder())
    nullCount = ByteBufferHelper.getInt(nullsBuffer)
    nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1
    pos = 0

    underlyingBuffer.position(underlyingBuffer.position() + 4 + nullCount * 4)
    super.initialize()
  }

  abstract override def extractTo(row: InternalRow, ordinal: Int): Unit = {
    if (pos == nextNullIndex) {
      seenNulls += 1

      if (seenNulls < nullCount) {
        nextNullIndex = ByteBufferHelper.getInt(nullsBuffer)
      }

      row.setNullAt(ordinal)
    } else {
      super.extractTo(row, ordinal)
    }

    pos += 1
  }

  abstract override def hasNext: Boolean = seenNulls < nullCount || super.hasNext
} 
Example 145
Source File: NullableColumnBuilder.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.sql.catalyst.InternalRow


private[columnar] trait NullableColumnBuilder extends ColumnBuilder {
  protected var nulls: ByteBuffer = _
  protected var nullCount: Int = _
  private var pos: Int = _

  abstract override def initialize(
      initialSize: Int,
      columnName: String,
      useCompression: Boolean): Unit = {

    nulls = ByteBuffer.allocate(1024)
    nulls.order(ByteOrder.nativeOrder())
    pos = 0
    nullCount = 0
    super.initialize(initialSize, columnName, useCompression)
  }

  abstract override def appendFrom(row: InternalRow, ordinal: Int): Unit = {
    columnStats.gatherStats(row, ordinal)
    if (row.isNullAt(ordinal)) {
      nulls = ColumnBuilder.ensureFreeSpace(nulls, 4)
      nulls.putInt(pos)
      nullCount += 1
    } else {
      super.appendFrom(row, ordinal)
    }
    pos += 1
  }

  abstract override def build(): ByteBuffer = {
    val nonNulls = super.build()
    val nullDataLen = nulls.position()

    nulls.limit(nullDataLen)
    nulls.rewind()

    val buffer = ByteBuffer
      .allocate(4 + nullDataLen + nonNulls.remaining())
      .order(ByteOrder.nativeOrder())
      .putInt(nullCount)
      .put(nulls)
      .put(nonNulls)

    buffer.rewind()
    buffer
  }

  protected def buildNonNulls(): ByteBuffer = {
    nulls.limit(nulls.position()).rewind()
    super.build()
  }
} 
Example 146
Source File: CompressibleColumnBuilder.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar.compression

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.columnar.{ColumnBuilder, NativeColumnBuilder}
import org.apache.spark.sql.types.AtomicType
import org.apache.spark.unsafe.Platform


private[columnar] trait CompressibleColumnBuilder[T <: AtomicType]
  extends ColumnBuilder with Logging {

  this: NativeColumnBuilder[T] with WithCompressionSchemes =>

  var compressionEncoders: Seq[Encoder[T]] = _

  abstract override def initialize(
      initialSize: Int,
      columnName: String,
      useCompression: Boolean): Unit = {

    compressionEncoders =
      if (useCompression) {
        schemes.filter(_.supports(columnType)).map(_.encoder[T](columnType))
      } else {
        Seq(PassThrough.encoder(columnType))
      }
    super.initialize(initialSize, columnName, useCompression)
  }

  // The various compression schemes, while saving memory use, cause all of the data within
  // the row to become unaligned, thus causing crashes.  Until a way of fixing the compression
  // is found to also allow aligned accesses this must be disabled for SPARC.

  protected def isWorthCompressing(encoder: Encoder[T]) = {
    CompressibleColumnBuilder.unaligned && encoder.compressionRatio < 0.8
  }

  private def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = {
    compressionEncoders.foreach(_.gatherCompressibilityStats(row, ordinal))
  }

  abstract override def appendFrom(row: InternalRow, ordinal: Int): Unit = {
    super.appendFrom(row, ordinal)
    if (!row.isNullAt(ordinal)) {
      gatherCompressibilityStats(row, ordinal)
    }
  }

  override def build(): ByteBuffer = {
    val nonNullBuffer = buildNonNulls()
    val encoder: Encoder[T] = {
      val candidate = compressionEncoders.minBy(_.compressionRatio)
      if (isWorthCompressing(candidate)) candidate else PassThrough.encoder(columnType)
    }

    // Header = null count + null positions
    val headerSize = 4 + nulls.limit()
    val compressedSize = if (encoder.compressedSize == 0) {
      nonNullBuffer.remaining()
    } else {
      encoder.compressedSize
    }

    val compressedBuffer = ByteBuffer
      // Reserves 4 bytes for compression scheme ID
      .allocate(headerSize + 4 + compressedSize)
      .order(ByteOrder.nativeOrder)
      // Write the header
      .putInt(nullCount)
      .put(nulls)

    logDebug(s"Compressor for [$columnName]: $encoder, ratio: ${encoder.compressionRatio}")
    encoder.compress(nonNullBuffer, compressedBuffer)
  }
}

private[columnar] object CompressibleColumnBuilder {
  val unaligned = Platform.unaligned()
} 
Example 147
Source File: SocketGenerator.scala    From flink-demos   with Apache License 2.0 5 votes vote down vote up
package com.dataartisans.flink.example.eventpattern.Socket

import java.net.{InetAddress, InetSocketAddress}
import java.nio.{ByteOrder, ByteBuffer}
import java.nio.channels.SocketChannel

import com.dataartisans.flink.example.eventpattern.{StandaloneGeneratorBase, Event}
import org.apache.flink.util.Collector

object SocketGenerator extends StandaloneGeneratorBase {

  val BASE_PORT = 51762

  def main(args: Array[String]): Unit = {

    val numPartitions = 4 //args(0).toInt
    val collectors = new Array[SocketCollector](numPartitions)

    // create the generator threads
    for (i <- 0 until collectors.length) {
      collectors(i) = new SocketCollector(BASE_PORT + i)
    }

    runGenerator(collectors)
  }
}

class SocketCollector(val port: Int) extends Collector[Event] {

  val channel = SocketChannel.open(new InetSocketAddress(InetAddress.getByName("localhost"), port))
  channel.configureBlocking(true)
  channel.finishConnect()

  val buffer = ByteBuffer.allocateDirect(4096).order(ByteOrder.LITTLE_ENDIAN)

  override def collect(t: Event): Unit = {
    if (buffer.remaining() < 8) {
      buffer.flip()
      channel.write(buffer)
      buffer.clear()
    }

    buffer.putInt(t.sourceAddress)
    buffer.putInt(t.event)
  }

  override def close(): Unit = {
    if (buffer.position() > 0) {
      buffer.flip()
      channel.write(buffer)
    }
    channel.close()
  }
} 
Example 148
Source File: EventDeSerializer.scala    From flink-demos   with Apache License 2.0 5 votes vote down vote up
package com.dataartisans.flink.example.eventpattern.kafka

import java.nio.{ByteBuffer, ByteOrder}

import com.dataartisans.flink.example.eventpattern.Event
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.util.serialization.{DeserializationSchema, SerializationSchema}


class EventDeSerializer extends DeserializationSchema[Event] with SerializationSchema[Event] {
  
  override def deserialize(bytes: Array[Byte]): Event = {
    val buffer = ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN)
    val address: Int = buffer.getInt(0)
    val eventType: Int = buffer.getInt(4)
    Event(address, eventType)
  }

  override def serialize(t: Event): Array[Byte] = {
    val byteBuffer = ByteBuffer.allocate(8).order(ByteOrder.LITTLE_ENDIAN)
    byteBuffer.putInt(0, t.sourceAddress)
    byteBuffer.putInt(4, t.event)
    byteBuffer.array()
  }

  override def isEndOfStream(t: Event): Boolean = false

  override def getProducedType: TypeInformation[Event] = {
    createTypeInformation[Event]
  }
} 
Example 149
Source File: NullableColumnAccessor.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar

import java.nio.{ByteOrder, ByteBuffer}

import org.apache.spark.sql.catalyst.expressions.MutableRow

private[columnar] trait NullableColumnAccessor extends ColumnAccessor {
  private var nullsBuffer: ByteBuffer = _
  private var nullCount: Int = _
  private var seenNulls: Int = 0

  private var nextNullIndex: Int = _
  private var pos: Int = 0

  abstract override protected def initialize(): Unit = {
    nullsBuffer = underlyingBuffer.duplicate().order(ByteOrder.nativeOrder())
    nullCount = ByteBufferHelper.getInt(nullsBuffer)
    nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1
    pos = 0

    underlyingBuffer.position(underlyingBuffer.position + 4 + nullCount * 4)
    super.initialize()
  }

  abstract override def extractTo(row: MutableRow, ordinal: Int): Unit = {
    if (pos == nextNullIndex) {
      seenNulls += 1

      if (seenNulls < nullCount) {
        nextNullIndex = ByteBufferHelper.getInt(nullsBuffer)
      }

      row.setNullAt(ordinal)
    } else {
      super.extractTo(row, ordinal)
    }

    pos += 1
  }

  abstract override def hasNext: Boolean = seenNulls < nullCount || super.hasNext
} 
Example 150
Source File: NullableColumnBuilder.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.sql.catalyst.InternalRow


private[columnar] trait NullableColumnBuilder extends ColumnBuilder {
  protected var nulls: ByteBuffer = _
  protected var nullCount: Int = _
  private var pos: Int = _

  abstract override def initialize(
      initialSize: Int,
      columnName: String,
      useCompression: Boolean): Unit = {

    nulls = ByteBuffer.allocate(1024)
    nulls.order(ByteOrder.nativeOrder())
    pos = 0
    nullCount = 0
    super.initialize(initialSize, columnName, useCompression)
  }

  abstract override def appendFrom(row: InternalRow, ordinal: Int): Unit = {
    columnStats.gatherStats(row, ordinal)
    if (row.isNullAt(ordinal)) {
      nulls = ColumnBuilder.ensureFreeSpace(nulls, 4)
      nulls.putInt(pos)
      nullCount += 1
    } else {
      super.appendFrom(row, ordinal)
    }
    pos += 1
  }

  abstract override def build(): ByteBuffer = {
    val nonNulls = super.build()
    val nullDataLen = nulls.position()

    nulls.limit(nullDataLen)
    nulls.rewind()

    val buffer = ByteBuffer
      .allocate(4 + nullDataLen + nonNulls.remaining())
      .order(ByteOrder.nativeOrder())
      .putInt(nullCount)
      .put(nulls)
      .put(nonNulls)

    buffer.rewind()
    buffer
  }

  protected def buildNonNulls(): ByteBuffer = {
    nulls.limit(nulls.position()).rewind()
    super.build()
  }
} 
Example 151
Source File: CompressibleColumnBuilder.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar.compression

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.columnar.{ColumnBuilder, NativeColumnBuilder}
import org.apache.spark.sql.types.AtomicType


private[columnar] trait CompressibleColumnBuilder[T <: AtomicType]
  extends ColumnBuilder with Logging {

  this: NativeColumnBuilder[T] with WithCompressionSchemes =>

  var compressionEncoders: Seq[Encoder[T]] = _

  abstract override def initialize(
      initialSize: Int,
      columnName: String,
      useCompression: Boolean): Unit = {

    compressionEncoders =
      if (useCompression) {
        schemes.filter(_.supports(columnType)).map(_.encoder[T](columnType))
      } else {
        Seq(PassThrough.encoder(columnType))
      }
    super.initialize(initialSize, columnName, useCompression)
  }

  protected def isWorthCompressing(encoder: Encoder[T]) = {
    encoder.compressionRatio < 0.8
  }

  private def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = {
    var i = 0
    while (i < compressionEncoders.length) {
      compressionEncoders(i).gatherCompressibilityStats(row, ordinal)
      i += 1
    }
  }

  abstract override def appendFrom(row: InternalRow, ordinal: Int): Unit = {
    super.appendFrom(row, ordinal)
    if (!row.isNullAt(ordinal)) {
      gatherCompressibilityStats(row, ordinal)
    }
  }

  override def build(): ByteBuffer = {
    val nonNullBuffer = buildNonNulls()
    val encoder: Encoder[T] = {
      val candidate = compressionEncoders.minBy(_.compressionRatio)
      if (isWorthCompressing(candidate)) candidate else PassThrough.encoder(columnType)
    }

    // Header = null count + null positions
    val headerSize = 4 + nulls.limit()
    val compressedSize = if (encoder.compressedSize == 0) {
      nonNullBuffer.remaining()
    } else {
      encoder.compressedSize
    }

    val compressedBuffer = ByteBuffer
      // Reserves 4 bytes for compression scheme ID
      .allocate(headerSize + 4 + compressedSize)
      .order(ByteOrder.nativeOrder)
      // Write the header
      .putInt(nullCount)
      .put(nulls)

    logDebug(s"Compressor for [$columnName]: $encoder, ratio: ${encoder.compressionRatio}")
    encoder.compress(nonNullBuffer, compressedBuffer)
  }
} 
Example 152
Source File: CompressionScheme.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.columnar.compression

import java.nio.{ByteBuffer, ByteOrder}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.MutableRow
import org.apache.spark.sql.execution.columnar.{ColumnType, NativeColumnType}
import org.apache.spark.sql.types.AtomicType

private[columnar] trait Encoder[T <: AtomicType] {
  def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = {}

  def compressedSize: Int

  def uncompressedSize: Int

  def compressionRatio: Double = {
    if (uncompressedSize > 0) compressedSize.toDouble / uncompressedSize else 1.0
  }

  def compress(from: ByteBuffer, to: ByteBuffer): ByteBuffer
}

private[columnar] trait Decoder[T <: AtomicType] {
  def next(row: MutableRow, ordinal: Int): Unit

  def hasNext: Boolean
}

private[columnar] trait CompressionScheme {
  def typeId: Int

  def supports(columnType: ColumnType[_]): Boolean

  def encoder[T <: AtomicType](columnType: NativeColumnType[T]): Encoder[T]

  def decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T]): Decoder[T]
}

private[columnar] trait WithCompressionSchemes {
  def schemes: Seq[CompressionScheme]
}

private[columnar] trait AllCompressionSchemes extends WithCompressionSchemes {
  override val schemes: Seq[CompressionScheme] = CompressionScheme.all
}

private[columnar] object CompressionScheme {
  val all: Seq[CompressionScheme] =
    Seq(PassThrough, RunLengthEncoding, DictionaryEncoding, BooleanBitSet, IntDelta, LongDelta)

  private val typeIdToScheme = all.map(scheme => scheme.typeId -> scheme).toMap

  def apply(typeId: Int): CompressionScheme = {
    typeIdToScheme.getOrElse(typeId, throw new UnsupportedOperationException(
      s"Unrecognized compression scheme type ID: $typeId"))
  }

  def columnHeaderSize(columnBuffer: ByteBuffer): Int = {
    val header = columnBuffer.duplicate().order(ByteOrder.nativeOrder)
    val nullCount = header.getInt()
    // null count + null positions
    4 + 4 * nullCount
  }
} 
Example 153
Source File: BufferPoolTestsJVM.scala    From boopickle   with Apache License 2.0 5 votes vote down vote up
package boopickle

import java.nio.{ByteBuffer, ByteOrder}

import utest._

object BufferPoolTestsJVM extends TestSuite {

  override def tests = Tests {
    "MultiThread" - {
      val pool  = BufferPool
      val count = 100000
      def runner = new Runnable {
        override def run(): Unit = {
          var i = 0
          while (i < count) {
            val bb1 = pool
              .allocate(ByteBufferProvider.initSize)
              .getOrElse(ByteBuffer.allocate(ByteBufferProvider.initSize))
              .order(ByteOrder.LITTLE_ENDIAN)
            val bb2 = pool
              .allocate(ByteBufferProvider.expandSize)
              .getOrElse(ByteBuffer.allocate(ByteBufferProvider.expandSize))
              .order(ByteOrder.LITTLE_ENDIAN)
            pool.release(bb1)
            pool.release(bb2)
            pool.release(ByteBuffer.allocate(ByteBufferProvider.initSize).order(ByteOrder.LITTLE_ENDIAN))
            i += 1
          }
        }
      }
      // warmup
      runner.run()
      runner.run()
      System.gc()
      // run in a single thread
      var startTime = System.nanoTime()
      runner.run()
      var endTime = System.nanoTime()
      println(s"Single thread: ${(endTime - startTime) / 1000}")
      var t1 = new Thread(runner)
      var t2 = new Thread(runner)
      startTime = System.nanoTime()
      t1.start()
      t2.start()
      t1.join()
      t2.join()
      endTime = System.nanoTime()
      println(s"Two threads: ${(endTime - startTime) / 1000}")
      startTime = System.nanoTime()
      t1 = new Thread(runner)
      t2 = new Thread(runner)
      val t3 = new Thread(runner)
      t1.start()
      t2.start()
      t3.start()
      t1.join()
      t2.join()
      t3.join()
      endTime = System.nanoTime()
      println(s"Three threads: ${(endTime - startTime) / 1000}")
    }
  }
} 
Example 154
Source File: GDBIndex.scala    From spark-gdb   with Apache License 2.0 5 votes vote down vote up
package com.esri.gdb

import java.io.{DataInput, File}
import java.nio.{ByteBuffer, ByteOrder}

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FSDataInputStream, Path}
import org.apache.spark.Logging

object GDBIndex {
  def apply(path: String, name: String, conf: Configuration = new Configuration()) = {
    val filename = StringBuilder.newBuilder.append(path).append(File.separator).append(name).append(".gdbtablx").toString()
    val hdfsPath = new Path(filename)
    val dataInput = hdfsPath.getFileSystem(conf).open(hdfsPath)

    val bytes = new Array[Byte](16)
    dataInput.readFully(bytes)
    val byteBuffer = ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN)

    val signature = byteBuffer.getInt
    val n1024Blocks = byteBuffer.getInt
    val numRows = byteBuffer.getInt
    val indexSize = byteBuffer.getInt

    new GDBIndex(dataInput, numRows, indexSize)
  }
}

private[gdb] class GDBIndex(dataInput: FSDataInputStream,
                            val numRows: Int,
                            indexSize: Int
                           ) extends Logging with AutoCloseable with Serializable {

  def readSeekForRowNum(rowNum: Int) = {
    val bytes = new Array[Byte](indexSize)
    dataInput.seek(16 + rowNum * indexSize)
    dataInput.readFully(bytes)
    ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN).getInt
  }

  def iterator(startAtRow: Int = 0, numRowsToRead: Int = -1) = {
    dataInput.seek(16 + startAtRow * indexSize)
    val maxRows = if (numRowsToRead == -1) numRows else numRowsToRead
    // log.info(s"iterator::startAtRow=$startAtRow maxRows=$maxRows")
    new GDBIndexIterator(dataInput, startAtRow, maxRows, indexSize).withFilter(_.isSeekable)
  }

  def close() {
    dataInput.close()
  }
}

private[gdb] class GDBIndexIterator(dataInput: DataInput,
                                    startID: Int,
                                    maxRows: Int,
                                    indexSize: Int
                                   ) extends Iterator[IndexInfo] with Logging with Serializable {

  private val indexInfo = IndexInfo(0, 0)
  private val bytes = new Array[Byte](indexSize)
  private val byteBuffer = ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN)

  private var objectID = startID
  private var nextRow = 0

  def hasNext() = nextRow < maxRows

  def next() = {
    // log.info(s"next::nextRow=$nextRow maxRows=$maxRows")
    nextRow += 1

    objectID += 1
    indexInfo.objectID = objectID

    byteBuffer.clear
    dataInput.readFully(bytes)
    indexInfo.seek = byteBuffer.getInt

    indexInfo
  }
} 
Example 155
Source File: DataBuffer.scala    From spark-gdb   with Apache License 2.0 5 votes vote down vote up
package com.esri.gdb

import java.nio.{ByteBuffer, ByteOrder}

import org.apache.hadoop.fs.FSDataInputStream


class DataBuffer(dataInput: FSDataInputStream) extends Serializable {

  private var bytes = new Array[Byte](1024)
  private var byteBuffer = ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN)

  def readBytes(length: Int) = {
    if (length > bytes.length) {
      bytes = new Array[Byte](length)
      byteBuffer = ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN)
    }
    else {
      byteBuffer.clear
    }
    dataInput.readFully(bytes, 0, length)
    byteBuffer
  }

  def seek(position: Long) = {
    dataInput.seek(position)
    this
  }

  def close() {
    dataInput.close()
  }
}

object DataBuffer {
  def apply(dataInput: FSDataInputStream) = {
    new DataBuffer(dataInput)
  }
} 
Example 156
Source File: TrafficMonitorThread.scala    From shadowsocksr-android   with GNU General Public License v3.0 5 votes vote down vote up
package com.github.shadowsocks.utils

import java.io.{File, IOException}
import java.nio.{ByteBuffer, ByteOrder}
import java.util.concurrent.Executors

import android.content.Context
import android.net.{LocalServerSocket, LocalSocket, LocalSocketAddress}
import android.util.Log

class TrafficMonitorThread(context: Context) extends Thread {

  val TAG = "TrafficMonitorThread"
  lazy val PATH = context.getApplicationInfo.dataDir + "/stat_path"

  @volatile var serverSocket: LocalServerSocket = null
  @volatile var isRunning: Boolean = true

  def closeServerSocket() {
    if (serverSocket != null) {
      try {
        serverSocket.close()
      } catch {
        case _: Exception => // ignore
      }
      serverSocket = null
      }
  }

  def stopThread() {
    isRunning = false
    closeServerSocket()
  }

  override def run() {

    try {
      new File(PATH).delete()
    } catch {
      case _: Exception => // ignore
    }

    try {
      val localSocket = new LocalSocket
      localSocket.bind(new LocalSocketAddress(PATH, LocalSocketAddress.Namespace.FILESYSTEM))
      serverSocket = new LocalServerSocket(localSocket.getFileDescriptor)
    } catch {
      case e: IOException =>
        Log.e(TAG, "unable to bind", e)
        return
    }

    val pool = Executors.newFixedThreadPool(1)

    while (isRunning) {
      try {
        val socket = serverSocket.accept()

        pool.execute(() => {
          try {
            val input = socket.getInputStream
            val output = socket.getOutputStream

            val buffer = new Array[Byte](16)
            if (input.read(buffer) != 16) throw new IOException("Unexpected traffic stat length")
            val stat = ByteBuffer.wrap(buffer).order(ByteOrder.LITTLE_ENDIAN)
            TrafficMonitor.update(stat.getLong(0), stat.getLong(8))

            output.write(0)

            input.close()
            output.close()

          } catch {
            case e: Exception =>
              Log.e(TAG, "Error when recv traffic stat", e)
          }

          // close socket
          try {
            socket.close()
          } catch {
            case _: Exception => // ignore
          }

        })
      } catch {
        case e: IOException =>
          Log.e(TAG, "Error when accept socket", e)
          return
      }
    }
  }
} 
Example 157
Source File: IPDiscoveryFlow.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.voice

import java.nio.ByteOrder

import scala.concurrent.{Future, Promise}

import akka.stream.scaladsl.Flow
import akka.stream.stage._
import akka.stream.{Attributes, FlowShape, Inlet, Outlet}
import akka.util.ByteString

class IPDiscoveryFlow(openValve: () => Unit)
    extends GraphStageWithMaterializedValue[FlowShape[ByteString, ByteString], Future[VoiceUDPFlow.FoundIP]] {

  val in: Inlet[ByteString]   = Inlet("IPDiscoveryFlow.in")
  val out: Outlet[ByteString] = Outlet("IPDiscoveryFlow.out")

  override def shape: FlowShape[ByteString, ByteString] = FlowShape(in, out)

  override def createLogicAndMaterializedValue(
      inheritedAttributes: Attributes
  ): (GraphStageLogic, Future[VoiceUDPFlow.FoundIP]) = {
    val promise = Promise[VoiceUDPFlow.FoundIP]
    val logic = new GraphStageLogicWithLogging(shape) with InHandler with OutHandler {

      override def onPush(): Unit = {
        val data = grab(in)
        log.debug(s"Grabbing data for IP discovery $data")
        val byteBuf = data.asByteBuffer.order(ByteOrder.BIG_ENDIAN)
        val tpe     = byteBuf.getShort

        require(tpe == 0x2, s"Was expecting IP discovery result, got $tpe")

        byteBuf.getShort //Length
        byteBuf.getInt   //SSRC
        val nullTermString = new Array[Byte](64)
        byteBuf.get(nullTermString)
        val address = new String(nullTermString, 0, nullTermString.iterator.takeWhile(_ != 0).length)
        val port    = byteBuf.getChar.toInt //Char is unsigned short

        promise.success(VoiceUDPFlow.FoundIP(address, port))
        log.debug("Success doing IP discovery")

        setHandler(
          in,
          new InHandler {
            override def onPush(): Unit = push(out, grab(in))
          }
        )

        openValve()
      }

      override def onPull(): Unit = pull(in)

      override def onUpstreamFailure(ex: Throwable): Unit = {
        promise.tryFailure(new Exception("Connection failed.", ex))
        super.onUpstreamFailure(ex)
      }

      setHandlers(in, out, this)
    }

    (logic, promise.future)
  }
}
object IPDiscoveryFlow {
  def flow(openValve: () => Unit): Flow[ByteString, ByteString, Future[VoiceUDPFlow.FoundIP]] =
    Flow.fromGraph(new IPDiscoveryFlow(openValve))
}