org.apache.commons.lang3.RandomUtils Scala Examples
The following examples show how to use org.apache.commons.lang3.RandomUtils.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: FlumeTestUtils.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.flume import java.net.{InetSocketAddress, ServerSocket} import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import java.util.{List => JList} import java.util.Collections import scala.collection.JavaConverters._ import org.apache.avro.ipc.NettyTransceiver import org.apache.avro.ipc.specific.SpecificRequestor import org.apache.commons.lang3.RandomUtils import org.apache.flume.source.avro import org.apache.flume.source.avro.{AvroFlumeEvent, AvroSourceProtocol} import org.jboss.netty.channel.ChannelPipeline import org.jboss.netty.channel.socket.SocketChannel import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory import org.jboss.netty.handler.codec.compression.{ZlibDecoder, ZlibEncoder} import org.apache.spark.util.Utils import org.apache.spark.SparkConf private class CompressionChannelFactory(compressionLevel: Int) extends NioClientSocketChannelFactory { override def newChannel(pipeline: ChannelPipeline): SocketChannel = { val encoder = new ZlibEncoder(compressionLevel) pipeline.addFirst("deflater", encoder) pipeline.addFirst("inflater", new ZlibDecoder()) super.newChannel(pipeline) } } }
Example 2
Source File: FlumeTestUtils.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.flume import java.net.{InetSocketAddress, ServerSocket} import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import java.util.{List => JList} import java.util.Collections import scala.collection.JavaConverters._ import org.apache.avro.ipc.NettyTransceiver import org.apache.avro.ipc.specific.SpecificRequestor import org.apache.commons.lang3.RandomUtils import org.apache.flume.source.avro import org.apache.flume.source.avro.{AvroFlumeEvent, AvroSourceProtocol} import org.jboss.netty.channel.ChannelPipeline import org.jboss.netty.channel.socket.SocketChannel import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory import org.jboss.netty.handler.codec.compression.{ZlibDecoder, ZlibEncoder} import org.apache.spark.util.Utils import org.apache.spark.SparkConf private class CompressionChannelFactory(compressionLevel: Int) extends NioClientSocketChannelFactory { override def newChannel(pipeline: ChannelPipeline): SocketChannel = { val encoder = new ZlibEncoder(compressionLevel) pipeline.addFirst("deflater", encoder) pipeline.addFirst("inflater", new ZlibDecoder()) super.newChannel(pipeline) } } }
Example 3
Source File: MQTTTestUtils.scala From bahir with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.mqtt import java.net.{ServerSocket, URI} import java.nio.charset.StandardCharsets import scala.language.postfixOps import org.apache.activemq.broker.{BrokerService, TransportConnector} import org.apache.activemq.usage.SystemUsage import org.apache.commons.lang3.RandomUtils import org.eclipse.paho.client.mqttv3._ import org.eclipse.paho.client.mqttv3.persist.MqttDefaultFilePersistence import org.apache.spark.SparkConf import org.apache.spark.internal.Logging import org.apache.spark.util.Utils private[mqtt] class MQTTTestUtils extends Logging { private val persistenceDir = Utils.createTempDir() private val brokerHost = "localhost" private val brokerPort = findFreePort() private var broker: BrokerService = _ private var systemUsage: SystemUsage = _ private var connector: TransportConnector = _ def brokerUri: String = { s"$brokerHost:$brokerPort" } def setup(): Unit = { broker = new BrokerService() broker.setDataDirectoryFile(Utils.createTempDir()) broker.getSystemUsage().setSendFailIfNoSpace(false) systemUsage = broker.getSystemUsage() systemUsage.getStoreUsage().setLimit(1024L * 1024 * 256); // 256 MB (default: 100 GB) systemUsage.getTempUsage().setLimit(1024L * 1024 * 128); // 128 MB (default: 50 GB) connector = new TransportConnector() connector.setName("mqtt") connector.setUri(new URI("mqtt://" + brokerUri)) broker.addConnector(connector) broker.start() } def teardown(): Unit = { if (broker != null) { broker.stop() broker = null } if (connector != null) { connector.stop() connector = null } Utils.deleteRecursively(persistenceDir) } private def findFreePort(): Int = { val candidatePort = RandomUtils.nextInt(1024, 65536) Utils.startServiceOnPort(candidatePort, (trialPort: Int) => { val socket = new ServerSocket(trialPort) socket.close() (null, trialPort) }, new SparkConf())._2 } def publishData(topic: String, data: String): Unit = { var client: MqttClient = null try { val persistence = new MqttDefaultFilePersistence(persistenceDir.getAbsolutePath) client = new MqttClient("tcp://" + brokerUri, MqttClient.generateClientId(), persistence) client.connect() if (client.isConnected) { val msgTopic = client.getTopic(topic) val message = new MqttMessage(data.getBytes(StandardCharsets.UTF_8)) message.setQos(1) message.setRetained(true) for (i <- 0 to 10) { try { msgTopic.publish(message) } catch { case e: MqttException if e.getReasonCode == MqttException.REASON_CODE_MAX_INFLIGHT => // wait for Spark streaming to consume something from the message queue Thread.sleep(50) } } } } finally { if (client != null) { client.disconnect() client.close() client = null } } } }
Example 4
Source File: FlumeTestUtils.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.flume import java.net.{InetSocketAddress, ServerSocket} import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import java.util.{List => JList} import java.util.Collections import scala.collection.JavaConverters._ import org.apache.avro.ipc.NettyTransceiver import org.apache.avro.ipc.specific.SpecificRequestor import org.apache.commons.lang3.RandomUtils import org.apache.flume.source.avro import org.apache.flume.source.avro.{AvroFlumeEvent, AvroSourceProtocol} import org.jboss.netty.channel.ChannelPipeline import org.jboss.netty.channel.socket.SocketChannel import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory import org.jboss.netty.handler.codec.compression.{ZlibDecoder, ZlibEncoder} import org.apache.spark.util.Utils import org.apache.spark.SparkConf private class CompressionChannelFactory(compressionLevel: Int) extends NioClientSocketChannelFactory { override def newChannel(pipeline: ChannelPipeline): SocketChannel = { val encoder = new ZlibEncoder(compressionLevel) pipeline.addFirst("deflater", encoder) pipeline.addFirst("inflater", new ZlibDecoder()) super.newChannel(pipeline) } } }
Example 5
Source File: FlumeStreamSuite.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.flume import java.net.{InetSocketAddress, ServerSocket} import java.nio.ByteBuffer import scala.collection.JavaConversions._ import scala.collection.mutable.{ArrayBuffer, SynchronizedBuffer} import scala.concurrent.duration._ import scala.language.postfixOps import com.google.common.base.Charsets import org.apache.avro.ipc.NettyTransceiver import org.apache.avro.ipc.specific.SpecificRequestor import org.apache.commons.lang3.RandomUtils import org.apache.flume.source.avro import org.apache.flume.source.avro.{AvroFlumeEvent, AvroSourceProtocol} import org.jboss.netty.channel.ChannelPipeline import org.jboss.netty.channel.socket.SocketChannel import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory import org.jboss.netty.handler.codec.compression._ import org.scalatest.{BeforeAndAfter, Matchers} import org.scalatest.concurrent.Eventually._ import org.apache.spark.{Logging, SparkConf, SparkFunSuite} import org.apache.spark.storage.StorageLevel import org.apache.spark.streaming.{Milliseconds, StreamingContext, TestOutputStream} import org.apache.spark.util.Utils class FlumeStreamSuite extends SparkFunSuite with BeforeAndAfter with Matchers with Logging { val conf = new SparkConf().setMaster("local[4]").setAppName("FlumeStreamSuite") var ssc: StreamingContext = null var transceiver: NettyTransceiver = null after { if (ssc != null) { ssc.stop() } if (transceiver != null) { transceiver.close() } } test("flume input stream") { testFlumeStream(testCompression = false) } test("flume input compressed stream") { testFlumeStream(testCompression = true) } private class CompressionChannelFactory(compressionLevel: Int) extends NioClientSocketChannelFactory { override def newChannel(pipeline: ChannelPipeline): SocketChannel = { val encoder = new ZlibEncoder(compressionLevel) pipeline.addFirst("deflater", encoder) pipeline.addFirst("inflater", new ZlibDecoder()) super.newChannel(pipeline) } } }
Example 6
Source File: FlumeTestUtils.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.flume import java.net.{InetSocketAddress, ServerSocket} import java.nio.ByteBuffer import java.util.{List => JList} import scala.collection.JavaConversions._ import com.google.common.base.Charsets.UTF_8 import org.apache.avro.ipc.NettyTransceiver import org.apache.avro.ipc.specific.SpecificRequestor import org.apache.commons.lang3.RandomUtils import org.apache.flume.source.avro import org.apache.flume.source.avro.{AvroSourceProtocol, AvroFlumeEvent} import org.jboss.netty.channel.ChannelPipeline import org.jboss.netty.channel.socket.SocketChannel import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory import org.jboss.netty.handler.codec.compression.{ZlibDecoder, ZlibEncoder} import org.apache.spark.util.Utils import org.apache.spark.SparkConf private class CompressionChannelFactory(compressionLevel: Int) extends NioClientSocketChannelFactory { override def newChannel(pipeline: ChannelPipeline): SocketChannel = { val encoder = new ZlibEncoder(compressionLevel) pipeline.addFirst("deflater", encoder) pipeline.addFirst("inflater", new ZlibDecoder()) super.newChannel(pipeline) } } }
Example 7
Source File: MQTTTestUtils.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.mqtt import java.net.{ServerSocket, URI} import scala.language.postfixOps import com.google.common.base.Charsets.UTF_8 import org.apache.activemq.broker.{BrokerService, TransportConnector} import org.apache.commons.lang3.RandomUtils import org.eclipse.paho.client.mqttv3._ import org.eclipse.paho.client.mqttv3.persist.MqttDefaultFilePersistence import org.apache.spark.util.Utils import org.apache.spark.{Logging, SparkConf} private[mqtt] class MQTTTestUtils extends Logging { private val persistenceDir = Utils.createTempDir() private val brokerHost = "localhost" private val brokerPort = findFreePort() private var broker: BrokerService = _ private var connector: TransportConnector = _ def brokerUri: String = { s"$brokerHost:$brokerPort" } def setup(): Unit = { broker = new BrokerService() broker.setDataDirectoryFile(Utils.createTempDir()) connector = new TransportConnector() connector.setName("mqtt") connector.setUri(new URI("mqtt://" + brokerUri)) broker.addConnector(connector) broker.start() } def teardown(): Unit = { if (broker != null) { broker.stop() broker = null } if (connector != null) { connector.stop() connector = null } Utils.deleteRecursively(persistenceDir) } private def findFreePort(): Int = { val candidatePort = RandomUtils.nextInt(1024, 65536) Utils.startServiceOnPort(candidatePort, (trialPort: Int) => { val socket = new ServerSocket(trialPort) socket.close() (null, trialPort) }, new SparkConf())._2 } def publishData(topic: String, data: String): Unit = { var client: MqttClient = null try { val persistence = new MqttDefaultFilePersistence(persistenceDir.getAbsolutePath) client = new MqttClient("tcp://" + brokerUri, MqttClient.generateClientId(), persistence) client.connect() if (client.isConnected) { val msgTopic = client.getTopic(topic) val message = new MqttMessage(data.getBytes(UTF_8)) message.setQos(1) message.setRetained(true) for (i <- 0 to 10) { try { msgTopic.publish(message) } catch { case e: MqttException if e.getReasonCode == MqttException.REASON_CODE_MAX_INFLIGHT => // wait for Spark streaming to consume something from the message queue Thread.sleep(50) } } } } finally { if (client != null) { client.disconnect() client.close() client = null } } } }
Example 8
Source File: FlumeTestUtils.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.flume import java.net.{InetSocketAddress, ServerSocket} import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import java.util.{List => JList} import java.util.Collections import scala.collection.JavaConverters._ import org.apache.avro.ipc.NettyTransceiver import org.apache.avro.ipc.specific.SpecificRequestor import org.apache.commons.lang3.RandomUtils import org.apache.flume.source.avro import org.apache.flume.source.avro.{AvroFlumeEvent, AvroSourceProtocol} import org.jboss.netty.channel.ChannelPipeline import org.jboss.netty.channel.socket.SocketChannel import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory import org.jboss.netty.handler.codec.compression.{ZlibDecoder, ZlibEncoder} import org.apache.spark.SparkConf import org.apache.spark.util.Utils private class CompressionChannelFactory(compressionLevel: Int) extends NioClientSocketChannelFactory { override def newChannel(pipeline: ChannelPipeline): SocketChannel = { val encoder = new ZlibEncoder(compressionLevel) pipeline.addFirst("deflater", encoder) pipeline.addFirst("inflater", new ZlibDecoder()) super.newChannel(pipeline) } } }
Example 9
Source File: FlumeTestUtils.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.flume import java.net.{InetSocketAddress, ServerSocket} import java.nio.ByteBuffer import java.util.{List => JList} import java.util.Collections import scala.collection.JavaConverters._ import com.google.common.base.Charsets.UTF_8 import org.apache.avro.ipc.NettyTransceiver import org.apache.avro.ipc.specific.SpecificRequestor import org.apache.commons.lang3.RandomUtils import org.apache.flume.source.avro import org.apache.flume.source.avro.{AvroSourceProtocol, AvroFlumeEvent} import org.jboss.netty.channel.ChannelPipeline import org.jboss.netty.channel.socket.SocketChannel import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory import org.jboss.netty.handler.codec.compression.{ZlibDecoder, ZlibEncoder} import org.apache.spark.util.Utils import org.apache.spark.SparkConf private class CompressionChannelFactory(compressionLevel: Int) extends NioClientSocketChannelFactory { override def newChannel(pipeline: ChannelPipeline): SocketChannel = { val encoder = new ZlibEncoder(compressionLevel) pipeline.addFirst("deflater", encoder) pipeline.addFirst("inflater", new ZlibDecoder()) super.newChannel(pipeline) } } }
Example 10
Source File: MQTTTestUtils.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.mqtt import java.net.{ServerSocket, URI} import scala.language.postfixOps import com.google.common.base.Charsets.UTF_8 import org.apache.activemq.broker.{BrokerService, TransportConnector} import org.apache.commons.lang3.RandomUtils import org.eclipse.paho.client.mqttv3._ import org.eclipse.paho.client.mqttv3.persist.MqttDefaultFilePersistence import org.apache.spark.util.Utils import org.apache.spark.{Logging, SparkConf} private[mqtt] class MQTTTestUtils extends Logging { private val persistenceDir = Utils.createTempDir() private val brokerHost = "localhost" private val brokerPort = findFreePort() private var broker: BrokerService = _ private var connector: TransportConnector = _ def brokerUri: String = { s"$brokerHost:$brokerPort" } def setup(): Unit = { broker = new BrokerService() broker.setDataDirectoryFile(Utils.createTempDir()) connector = new TransportConnector() connector.setName("mqtt") connector.setUri(new URI("mqtt://" + brokerUri)) broker.addConnector(connector) broker.start() } def teardown(): Unit = { if (broker != null) { broker.stop() broker = null } if (connector != null) { connector.stop() connector = null } Utils.deleteRecursively(persistenceDir) } private def findFreePort(): Int = { val candidatePort = RandomUtils.nextInt(1024, 65536) Utils.startServiceOnPort(candidatePort, (trialPort: Int) => { val socket = new ServerSocket(trialPort) socket.close() (null, trialPort) }, new SparkConf())._2 } def publishData(topic: String, data: String): Unit = { var client: MqttClient = null try { val persistence = new MqttDefaultFilePersistence(persistenceDir.getAbsolutePath) client = new MqttClient("tcp://" + brokerUri, MqttClient.generateClientId(), persistence) client.connect() if (client.isConnected) { val msgTopic = client.getTopic(topic) val message = new MqttMessage(data.getBytes(UTF_8)) message.setQos(1) message.setRetained(true) for (i <- 0 to 10) { try { msgTopic.publish(message) } catch { case e: MqttException if e.getReasonCode == MqttException.REASON_CODE_MAX_INFLIGHT => // wait for Spark streaming to consume something from the message queue Thread.sleep(50) } } } } finally { if (client != null) { client.disconnect() client.close() client = null } } } }