Java Code Examples for com.datatorrent.api.LocalMode#getDAG()

The following examples show how to use com.datatorrent.api.LocalMode#getDAG() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ApexStreamImpl.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Override
public void runEmbedded(boolean async, long duration, Callable<Boolean> exitCondition)
{
  LocalMode lma = LocalMode.newInstance();
  populateDag(lma.getDAG());
  DAG dag = lma.getDAG();
  LocalMode.Controller lc = lma.getController();
  if (lc instanceof StramLocalCluster) {
    ((StramLocalCluster)lc).setExitCondition(exitCondition);
  }
  if (async) {
    lc.runAsync();
  } else {
    if (duration >= 0) {
      lc.run(duration);
    } else {
      lc.run();
    }
  }

}
 
Example 2
Source File: KinesisInputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testWindowDataManager() throws Exception
{
  // Create DAG for testing.
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  KinesisStringInputOperator inputOperator = dag.addOperator("KinesisInput", new KinesisStringInputOperator()
  {
    @Override
    public void deactivate()
    {
    }

    @Override
    public void teardown()
    {
    }
  });
  testMeta.operator = inputOperator;
  Assert.assertTrue("Default behaviour of WindowDataManager changed",
      (inputOperator.getWindowDataManager() instanceof WindowDataManager.NoopWindowDataManager));
}
 
Example 3
Source File: FunctionOperatorTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void testMapOperator() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  NumberGenerator numGen = dag.addOperator("numGen", new NumberGenerator());
  FunctionOperator.MapFunctionOperator<Integer, Integer> mapper
      = dag.addOperator("mapper", new FunctionOperator.MapFunctionOperator<Integer, Integer>(new Square()));
  ResultCollector collector = dag.addOperator("collector", new ResultCollector());

  dag.addStream("raw numbers", numGen.output, mapper.input);
  dag.addStream("mapped results", mapper.output, collector.input);

  // Create local cluster
  LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(false);

  ((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
  {
    @Override
    public Boolean call() throws Exception
    {
      return TupleCount == NumTuples;
    }
  });

  lc.run(5000);

  Assert.assertEquals(sum, 285);
}
 
Example 4
Source File: XmlParserApplicationTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void testApplication()
{
  try {
    LocalMode lma = LocalMode.newInstance();
    DAG dag = lma.getDAG();
    XmlDataEmitterOperator input = dag.addOperator("data", new XmlDataEmitterOperator());
    XmlParser parser = dag.addOperator("xmlparser", new XmlParser());
    ResultCollector rc = dag.addOperator("rc", new ResultCollector());
    dag.getMeta(parser).getMeta(parser.out).getAttributes().put(Context.PortContext.TUPLE_CLASS, org.apache.apex.malhar.lib.parser.XmlParserTest.EmployeeBean.class);
    ConsoleOutputOperator xmlObjectOp = dag.addOperator("xmlObjectOp", new ConsoleOutputOperator());
    xmlObjectOp.setDebug(true);
    dag.addStream("input", input.output, parser.in);
    dag.addStream("output", parser.parsedOutput, xmlObjectOp.input);
    dag.addStream("pojo", parser.out,rc.input);
    LocalMode.Controller lc = lma.getController();
    lc.setHeartbeatMonitoringEnabled(false);
    ((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
    {
      @Override
      public Boolean call() throws Exception
      {
        return TupleCount == 1;
      }
    });
    lc.run(10000);// runs for 10 seconds and quits
    Assert.assertEquals(1,TupleCount);
    Assert.assertEquals("john", obj.getName());
  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }
}
 
Example 5
Source File: KeyValueStoreOperatorTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
public void testInputOperator() throws Exception
{
  testStore.connect();
  testStore.put("test_abc", "789");
  testStore.put("test_def", "456");
  testStore.put("test_ghi", "123");
  try {
    LocalMode lma = LocalMode.newInstance();
    DAG dag = lma.getDAG();
    @SuppressWarnings("unchecked")
    InputOperator<S> inputOperator = dag.addOperator("input", new InputOperator<S>());
    CollectorModule<Object> collector = dag.addOperator("collector", new CollectorModule<Object>());
    inputOperator.addKey("test_abc");
    inputOperator.addKey("test_def");
    inputOperator.addKey("test_ghi");
    inputOperator.setStore(operatorStore);
    dag.addStream("stream", inputOperator.outputPort, collector.inputPort);
    final LocalMode.Controller lc = lma.getController();
    lc.run(3000);
    lc.shutdown();
    Assert.assertEquals("789", CollectorModule.resultMap.get("test_abc"));
    Assert.assertEquals("456", CollectorModule.resultMap.get("test_def"));
    Assert.assertEquals("123", CollectorModule.resultMap.get("test_ghi"));

  } finally {
    testStore.remove("test_abc");
    testStore.remove("test_def");
    testStore.remove("test_ghi");
    testStore.disconnect();
  }
}
 
Example 6
Source File: KafkaOutputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
private List<Person> ReadFromKafka()
{
  tupleCollection.clear();

  // Create KafkaSinglePortStringInputOperator
  Properties props = new Properties();
  props.put(BOOTSTRAP_SERVERS_CONFIG, getClusterConfig());
  props.put(KEY_DESERIALIZER_CLASS_CONFIG, KafkaSinglePortExactlyOnceOutputOperator.KEY_DESERIALIZER);
  props.put(VALUE_DESERIALIZER_CLASS_CONFIG, VALUE_DESERIALIZER);
  props.put(GROUP_ID_CONFIG, "KafkaTest");

  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  // Create KafkaSinglePortStringInputOperator
  KafkaSinglePortInputOperator node = dag.addOperator("Kafka input", KafkaSinglePortInputOperator.class);
  node.setConsumerProps(props);
  node.setInitialPartitionCount(1);
  // set topic
  node.setTopics(testName);
  node.setInitialOffset(AbstractKafkaInputOperator.InitialOffset.EARLIEST.name());
  node.setClusters(getClusterConfig());
  node.setStrategy("one_to_one");

  // Create Test tuple collector
  CollectorModule collector1 = dag.addOperator("collector", new CollectorModule());

  // Connect ports
  dag.addStream("Kafka message", node.outputPort, collector1.inputPort);

  // Create local cluster
  final LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(false);

  lc.run(30000);

  return tupleCollection;
}
 
Example 7
Source File: FunctionOperatorTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void testFilterOperator() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  FunctionOperator.FilterFunctionOperator<Integer> filter0
      = new FunctionOperator.FilterFunctionOperator<Integer>(new Function.FilterFunction<Integer>()
      {
        @Override
        public boolean f(Integer in)
        {
          return in % divider == 0;
        }
      });

  NumberGenerator numGen = dag.addOperator("numGen", new NumberGenerator());
  FunctionOperator.FilterFunctionOperator<Integer> filter = dag.addOperator("filter", filter0);
  ResultCollector collector = dag.addOperator("collector", new ResultCollector());

  dag.addStream("raw numbers", numGen.output, filter.input);
  dag.addStream("filtered results", filter.output, collector.input);

  // Create local cluster
  LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(false);

  ((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
  {
    @Override
    public Boolean call() throws Exception
    {
      return TupleCount == NumTuples / divider;
    }
  });

  lc.run(5000);
  Assert.assertEquals(sum, 20);
}
 
Example 8
Source File: FunctionOperatorTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void testFlatMapOperator() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  NumberListGenerator numGen = dag.addOperator("numGen", new NumberListGenerator());
  FunctionOperator.FlatMapFunctionOperator<List<Integer>, Integer> fm
      = dag.addOperator("flatmap", new FunctionOperator.FlatMapFunctionOperator<>(new FmFunction()));
  ResultCollector collector = dag.addOperator("collector", new ResultCollector());

  dag.addStream("raw numbers", numGen.output, fm.input);
  dag.addStream("flatmap results", fm.output, collector.input);

  // Create local cluster
  LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(false);

  ((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
  {
    @Override
    public Boolean call() throws Exception
    {
      return TupleCount == 13;
    }
  });

  lc.run(5000);

  Assert.assertEquals(sum, 39555);
}
 
Example 9
Source File: ShardManagerTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
public void testPartitionableInputOperator(KinesisConsumer consumer) throws Exception
{
  // Set to 3 because we want to make sure all the tuples from both 2 partitions are received and offsets has been updated to 102
  latch = new CountDownLatch(3);

  // Start producer
  KinesisTestProducer p = new KinesisTestProducer(streamName, true);
  p.setSendCount(totalCount);
  // wait the producer send all records
  p.run();

  // Create DAG for testing.
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  // Create KinesisSinglePortStringInputOperator
  KinesisStringInputOperator node = dag.addOperator("Kinesis consumer", KinesisStringInputOperator.class);
  node.setAccessKey(credentials.getCredentials().getAWSSecretKey());
  node.setSecretKey(credentials.getCredentials().getAWSAccessKeyId());
  node.setStreamName(streamName);
  TestShardManager tfm = new TestShardManager();

  tfm.setFilename(streamName + OFFSET_FILE);

  node.setShardManager(tfm);

  node.setStrategy(AbstractKinesisInputOperator.PartitionStrategy.MANY_TO_ONE.toString());
  node.setRepartitionInterval(-1);

  //set topic
  consumer.setStreamName(streamName);
  //set the brokerlist used to initialize the partition
  consumer.setInitialOffset("earliest");

  node.setConsumer(consumer);

  // Create Test tuple collector
  CollectorModule collector = dag.addOperator("RecordCollector", new CollectorModule());

  // Connect ports
  dag.addStream("Kinesis Records", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);

  // Create local cluster
  final LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(true);

  lc.runAsync();

  // Wait 15s for consumer finish consuming all the records
  latch.await(15000, TimeUnit.MILLISECONDS);

  // Check results
  assertEquals("Tuple count", totalCount, collectedTuples.size());
  logger.debug(String.format("Number of emitted tuples: %d -> %d", collectedTuples.size(), totalCount));

  lc.shutdown();
}
 
Example 10
Source File: KinesisPartitionableInputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
public void testPartitionableInputOperator(KinesisConsumer consumer) throws Exception
{
  // Set to 2 because we want to make sure END_TUPLE from both 2 partitions are received
  latch = new CountDownLatch(2);

  int totalCount = 100;

  // Start producer
  KinesisTestProducer p = new KinesisTestProducer(streamName, true);
  p.setSendCount(totalCount);
  new Thread(p).start();

  // Create DAG for testing.
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  // Create KinesisSinglePortStringInputOperator
  KinesisStringInputOperator node = dag.addOperator("Kinesis consumer", KinesisStringInputOperator.class);
  node.setAccessKey(credentials.getCredentials().getAWSSecretKey());
  node.setSecretKey(credentials.getCredentials().getAWSAccessKeyId());
  node.setStreamName(streamName);
  //set topic
  consumer.setStreamName(streamName);
  node.setConsumer(consumer);

  // Create Test tuple collector
  CollectorModule<String> collector = dag.addOperator("RecordsCollector", new CollectorModule<String>());

  // Connect ports
  dag.addStream("Kinesis stream", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);

  // Create local cluster
  final LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(false);

  lc.runAsync();

  //Wait 15s for consumer finish consuming all the records
  latch.await(15000, TimeUnit.MILLISECONDS);

  // Check results
  Assert.assertEquals("Collections size", 1, collections.size());
  Assert.assertEquals("Tuple count", totalCount, collections.get(collector.inputPort.id).size());
  logger.debug(String.format("Number of emitted tuples: %d", collections.get(collector.inputPort.id).size()));

  //p.close();
  lc.shutdown();
}
 
Example 11
Source File: KinesisInputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
@Test
public void testKinesisByteArrayInputOperator() throws Exception
{
  int totalCount = 10;
  // initial the latch for this test
  latch = new CountDownLatch(1);

  // Start producer
  KinesisTestProducer p = new KinesisTestProducer(streamName);
  p.setSendCount(totalCount);
  p.setBatchSize(9);
  new Thread(p).start();

  // Create DAG for testing.
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  // Create KinesisByteArrayInputOperator and set some properties with respect to consumer.
  KinesisByteArrayInputOperator node = dag.addOperator("Kinesis message consumer", KinesisByteArrayInputOperator.class);
  node.setAccessKey(credentials.getCredentials().getAWSSecretKey());
  node.setSecretKey(credentials.getCredentials().getAWSAccessKeyId());
  KinesisConsumer consumer = new KinesisConsumer();
  consumer.setStreamName(streamName);
  consumer.setRecordsLimit(totalCount);
  node.setConsumer(consumer);

  // Create Test tuple collector
  CollectorModule<byte[]> collector = dag.addOperator("TestMessageCollector", new CollectorModule<byte[]>());

  // Connect ports
  dag.addStream("Kinesis message", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);

  // Create local cluster
  final LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(false);

  lc.runAsync();

  // Wait 45s for consumer finish consuming all the messages
  latch.await(45000, TimeUnit.MILLISECONDS);

  // Check results
  Assert.assertEquals("Collections size", 1, collections.size());
  Assert.assertEquals("Tuple count", totalCount, collections.get(collector.inputPort.id).size());
  logger.debug(String.format("Number of emitted tuples: %d", collections.get(collector.inputPort.id).size()));

  lc.shutdown();
}
 
Example 12
Source File: KafkaInputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
public void testInputOperator(boolean hasFailure, boolean idempotent) throws Exception
{
  // each broker should get a END_TUPLE message
  latch = new CountDownLatch(countDownAll ? totalCount + totalBrokers : totalBrokers);

  logger.info(
      "Test Case: name: {}; totalBrokers: {}; hasFailure: {}; hasMultiCluster: {};" +
      " hasMultiPartition: {}, partition: {}",
      testName, totalBrokers, hasFailure, hasMultiCluster, hasMultiPartition, partition);

  // Start producer
  KafkaTestProducer p = new KafkaTestProducer(testName, hasMultiPartition, hasMultiCluster);
  p.setSendCount(totalCount);
  Thread t = new Thread(p);
  t.start();

  int expectedReceiveCount = totalCount + totalBrokers;

  // Create DAG for testing.
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  // Create KafkaSinglePortStringInputOperator
  KafkaSinglePortInputOperator node = dag.addOperator(
      "Kafka input" + testName, KafkaSinglePortInputOperator.class);
  node.setInitialPartitionCount(1);
  // set topic
  node.setTopics(testName);
  node.setInitialOffset(AbstractKafkaInputOperator.InitialOffset.EARLIEST.name());
  node.setClusters(getClusterConfig());
  node.setStrategy(partition);
  if (idempotent) {
    node.setWindowDataManager(new FSWindowDataManager());
  }

  // Create Test tuple collector
  CollectorModule collector = dag.addOperator("TestMessageCollector", CollectorModule.class);
  collector.isIdempotentTest = idempotent;

  // Connect ports
  dag.addStream("Kafka message" + testName, node.outputPort, collector.inputPort)
      .setLocality(Locality.CONTAINER_LOCAL);

  if (hasFailure) {
    setupHasFailureTest(node, dag);
  }

  // Create local cluster
  LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(false);

  //let the Controller to run the inside another thread. It is almost same as call Controller.runAsync(),
  //but Controller.runAsync() don't expose the thread which run it,
  //so we don't know when the thread will be terminated.
  //create this thread and then call join() to make sure the Controller shutdown completely.
  monitorThread = new Thread((StramLocalCluster)lc, "master");
  monitorThread.start();

  boolean notTimeout = true;
  try {
    // Wait 60s for consumer finish consuming all the messages
    notTimeout = latch.await(waitTime, TimeUnit.MILLISECONDS);
    lc.shutdown();

    //wait until control thread finished.
    monitorThread.join();
  } catch (Exception e) {
    logger.warn(e.getMessage());
  }

  t.join();

  if (!notTimeout || expectedReceiveCount != tupleCollection.size()) {
    logger.info("Number of received/expected tuples: {}/{}, testName: {}, tuples: \n{}", tupleCollection.size(),
        expectedReceiveCount, testName, tupleCollection);
  }
  Assert.assertTrue("TIMEOUT. testName: " + this.testName + "; Collected data: "
      + tupleCollection, notTimeout);

  // Check results
  Assert.assertTrue("testName: " + testName + "; Collected tuple size: " + tupleCollection.size()
      + "; Expected tuple size: " + expectedReceiveCount + "; data: \n" + tupleCollection,
      expectedReceiveCount == tupleCollection.size());

  logger.info("End of test case: {}", testName);
}
 
Example 13
Source File: RedisInputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
@Test
public void testIntputOperator() throws IOException
{
  this.operatorStore = new RedisStore();
  this.testStore = new RedisStore();

  testStore.connect();
  ScanParams params = new ScanParams();
  params.count(1);

  testStore.put("test_abc", "789");
  testStore.put("test_def", "456");
  testStore.put("test_ghi", "123");

  try {
    LocalMode lma = LocalMode.newInstance();
    DAG dag = lma.getDAG();

    RedisKeyValueInputOperator inputOperator = dag.addOperator("input", new RedisKeyValueInputOperator());
    final CollectorModule collector = dag.addOperator("collector", new CollectorModule());

    inputOperator.setStore(operatorStore);
    dag.addStream("stream", inputOperator.outputPort, collector.inputPort);
    final LocalMode.Controller lc = lma.getController();

    new Thread("LocalClusterController")
    {
      @Override
      public void run()
      {
        long startTms = System.currentTimeMillis();
        long timeout = 50000L;
        try {
          Thread.sleep(1000);
          while (System.currentTimeMillis() - startTms < timeout) {
            if (CollectorModule.resultMap.size() < 3) {
              Thread.sleep(10);
            } else {
              break;
            }
          }
        } catch (InterruptedException ex) {
          //
        }
        lc.shutdown();
      }
    }.start();

    lc.run();

    Assert.assertTrue(CollectorModule.resultMap.contains(new KeyValPair<String, String>("test_abc", "789")));
    Assert.assertTrue(CollectorModule.resultMap.contains(new KeyValPair<String, String>("test_def", "456")));
    Assert.assertTrue(CollectorModule.resultMap.contains(new KeyValPair<String, String>("test_ghi", "123")));
  } finally {
    for (KeyValPair<String, String> entry : CollectorModule.resultMap) {
      testStore.remove(entry.getKey());
    }
    testStore.disconnect();
  }
}
 
Example 14
Source File: KafkaInputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
public void testInputOperator(boolean hasFailure, boolean idempotent) throws Exception
{
  // each broker should get a END_TUPLE message
  latch = new CountDownLatch(countDownAll ? totalCount + totalBrokers : totalBrokers);

  logger.info(
      "Test Case: name: {}; totalBrokers: {}; hasFailure: {}; hasMultiCluster: {};" +
      " hasMultiPartition: {}, partition: {}",
      testName, totalBrokers, hasFailure, hasMultiCluster, hasMultiPartition, partition);

  // Start producer
  KafkaTestProducer p = new KafkaTestProducer(testName, hasMultiPartition, hasMultiCluster);
  p.setSendCount(totalCount);
  Thread t = new Thread(p);
  t.start();

  int expectedReceiveCount = totalCount + totalBrokers;

  // Create DAG for testing.
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  // Create KafkaSinglePortStringInputOperator
  KafkaSinglePortInputOperator node = dag.addOperator(
      "Kafka input" + testName, KafkaSinglePortInputOperator.class);
  node.setInitialPartitionCount(1);
  // set topic
  node.setTopics(testName);
  node.setInitialOffset(AbstractKafkaInputOperator.InitialOffset.EARLIEST.name());
  node.setClusters(getClusterConfig());
  node.setStrategy(partition);
  if (idempotent) {
    node.setWindowDataManager(new FSWindowDataManager());
  }

  // Create Test tuple collector
  CollectorModule collector = dag.addOperator("TestMessageCollector", CollectorModule.class);
  collector.isIdempotentTest = idempotent;

  // Connect ports
  dag.addStream("Kafka message" + testName, node.outputPort, collector.inputPort)
      .setLocality(Locality.CONTAINER_LOCAL);

  if (hasFailure) {
    setupHasFailureTest(node, dag);
  }

  // Create local cluster
  LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(false);

  //let the Controller to run the inside another thread. It is almost same as call Controller.runAsync(),
  //but Controller.runAsync() don't expose the thread which run it,
  //so we don't know when the thread will be terminated.
  //create this thread and then call join() to make sure the Controller shutdown completely.
  monitorThread = new Thread((StramLocalCluster)lc, "master");
  monitorThread.start();

  boolean notTimeout = true;
  try {
    // Wait 60s for consumer finish consuming all the messages
    notTimeout = latch.await(waitTime, TimeUnit.MILLISECONDS);
    lc.shutdown();

    //wait until control thread finished.
    monitorThread.join();
  } catch (Exception e) {
    logger.warn(e.getMessage());
  }

  t.join();

  if (!notTimeout || expectedReceiveCount != tupleCollection.size()) {
    logger.info("Number of received/expected tuples: {}/{}, testName: {}, tuples: \n{}", tupleCollection.size(),
        expectedReceiveCount, testName, tupleCollection);
  }
  Assert.assertTrue("TIMEOUT. testName: " + this.testName + "; Collected data: "
      + tupleCollection, notTimeout);

  // Check results
  Assert.assertTrue("testName: " + testName + "; Collected tuple size: " + tupleCollection.size()
      + "; Expected tuple size: " + expectedReceiveCount + "; data: \n" + tupleCollection,
      expectedReceiveCount == tupleCollection.size());

  logger.info("End of test case: {}", testName);
}
 
Example 15
Source File: MqttInputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
@Test
public void testInputOperator() throws InterruptedException, Exception
{
  String host = "localhost";
  int port = 1883;
  MqttClientConfig config = new MqttClientConfig();
  config.setHost(host);
  config.setPort(port);
  config.setCleanSession(true);

  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();
  final TestMqttInputOperator input = dag.addOperator("input", TestMqttInputOperator.class);
  CollectorModule<KeyValPair<String, String>> collector = dag.addOperator("collector", new CollectorModule<KeyValPair<String, String>>());

  input.addSubscribeTopic("a", QoS.AT_MOST_ONCE);
  input.addSubscribeTopic("b", QoS.AT_MOST_ONCE);
  input.addSubscribeTopic("c", QoS.AT_MOST_ONCE);
  input.setMqttClientConfig(config);
  input.setup(null);
  dag.addStream("stream", input.outputPort, collector.inputPort);

  final LocalMode.Controller lc = lma.getController();
  lc.runAsync();

  long timeout = System.currentTimeMillis() + 3000;
  while (true) {
    if (activated) {
      break;
    }
    Assert.assertTrue("Activation timeout", timeout > System.currentTimeMillis());
    Thread.sleep(1000);
  }

  input.activate(null);
  //Thread.sleep(3000);
  input.generateData();

  long timeout1 = System.currentTimeMillis() + 5000;
  try {
    while (true) {
      if (resultCount == 0) {
        Thread.sleep(10);
        Assert.assertTrue("timeout without getting any data", System.currentTimeMillis() < timeout1);
      } else {
        break;
      }
    }
  } catch (InterruptedException ex) {
    // ignore
  }
  lc.shutdown();

  Assert.assertEquals("Number of emitted tuples", 3, resultMap.size());
  Assert.assertEquals("value of a is ", "10", resultMap.get("a"));
  Assert.assertEquals("value of b is ", "200", resultMap.get("b"));
  Assert.assertEquals("value of c is ", "3000", resultMap.get("c"));
  logger.debug("resultCount:" + resultCount);
}
 
Example 16
Source File: MqttOutputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
@Test
public void testDag() throws Exception
{
  String host = "localhost";
  int port = 1883;
  MqttClientConfig config = new MqttClientConfig();
  config.setHost(host);
  config.setPort(port);
  config.setCleanSession(true);
  sendingData.put("testa", "2");
  sendingData.put("testb", "20");
  sendingData.put("testc", "1000");
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();
  SourceModule source = dag.addOperator("source", SourceModule.class);

  TestMqttOutputOperator producer = dag.addOperator("producer", new TestMqttOutputOperator());
  producer.setMqttClientConfig(config);

  dag.addStream("Stream", source.outPort, producer.inputPort).setLocality(Locality.CONTAINER_LOCAL);

  TestMqttOutputOperator.GetDataThread consumer = producer.new GetDataThread();
  producer.setup(null);

  consumer.start();

  final LocalMode.Controller lc = lma.getController();
  lc.runAsync();
  Thread.sleep(2000);
  lc.shutdown();

  Assert.assertEquals("emitted value for testNum was ", 3, receivedData.size());
  for (Map.Entry<String, String> e : receivedData.entrySet()) {
    if (e.getKey().equals("testa")) {
      Assert.assertEquals("emitted value for 'testa' was ", "2", e.getValue());
    } else if (e.getKey().equals("testb")) {
      Assert.assertEquals("emitted value for 'testb' was ", "20", e.getValue());
    } else if (e.getKey().equals("testc")) {
      Assert.assertEquals("emitted value for 'testc' was ", "1000", e.getValue());
    }
  }

  logger.debug("end of test");
}
 
Example 17
Source File: KafkaPartitionableInputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
public void testPartitionableInputOperator(KafkaConsumer consumer) throws Exception
{

  // each broker should get a END_TUPLE message
  latch = new CountDownLatch(totalBrokers);

  int totalCount = 10000;

  // Start producer
  KafkaTestProducer p = new KafkaTestProducer(TEST_TOPIC, hasMultiPartition, hasMultiCluster);
  p.setSendCount(totalCount);
  new Thread(p).start();

  // Create DAG for testing.
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  // Create KafkaSinglePortStringInputOperator
  KafkaSinglePortStringInputOperator node = dag.addOperator("Kafka message consumer", KafkaSinglePortStringInputOperator.class);
  node.setInitialPartitionCount(1);

  // set topic
  consumer.setTopic(TEST_TOPIC);
  consumer.setInitialOffset("earliest");

  node.setConsumer(consumer);

  String clusterString = "cluster1::localhost:" + TEST_ZOOKEEPER_PORT[0] + (hasMultiCluster ? ";cluster2::localhost:" + TEST_ZOOKEEPER_PORT[1] : "");
  node.setZookeeper(clusterString);

  // Create Test tuple collector
  CollectorModule<String> collector = dag.addOperator("TestMessageCollector", new CollectorModule<String>());

  // Connect ports
  dag.addStream("Kafka message", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);

  // Create local cluster
  final LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(false);

  lc.runAsync();

  // Wait 30s for consumer finish consuming all the messages
  Assert.assertTrue("TIMEOUT: 40s ", latch.await(40000, TimeUnit.MILLISECONDS));

  // Check results
  Assert.assertEquals("Collections size", 1, collections.size());
  Assert.assertEquals("Tuple count", totalCount, collections.get(collector.inputPort.id).size());
  logger.debug(String.format("Number of emitted tuples: %d", collections.get(collector.inputPort.id).size()));

  p.close();
  lc.shutdown();
  // kafka has a bug shutdown connector you have to make sure kafka client resource has been cleaned before clean the broker
  Thread.sleep(5000);
}
 
Example 18
Source File: KafkaExactlyOnceOutputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
/**
 * Test AbstractKafkaExactOnceOutputOperator (i.e. an output adapter for Kafka, aka producer).
 * This module sends data into a Kafka message bus.
 *
 * [Generate tuple] ==> [send tuple through Kafka output adapter(i.e. producer) into Kafka message bus](fail the producer at certain point and bring it back)
 * ==> [receive data in outside Kafka listener (i.e consumer)] ==> Verify kafka doesn't receive duplicated message
 *
 * @throws Exception
 */
@Test
@SuppressWarnings({"rawtypes"})
public void testKafkaExactOnceOutputOperator() throws Exception
{
  //initialize the latch to synchronize the threads
  latch = new CountDownLatch(maxTuple);
  // Setup a message listener to receive the message
  KafkaTestConsumer listener = new KafkaTestConsumer("topic1");
  listener.setLatch(latch);

  // Malhar module to send message
  // Create DAG for testing.
  LocalMode lma = LocalMode.newInstance();
  final DAG dag = lma.getDAG();

  StringGeneratorInputOperator generator = dag.addOperator("TestStringGenerator", StringGeneratorInputOperator.class);
  final SimpleKafkaExactOnceOutputOperator node = dag.addOperator("Kafka message producer", SimpleKafkaExactOnceOutputOperator.class);

  Properties props = new Properties();
  props.setProperty("serializer.class", "kafka.serializer.StringEncoder");
  props.put("metadata.broker.list", "localhost:9092");
  props.setProperty("producer.type", "async");
  props.setProperty("queue.buffering.max.ms", "200");
  props.setProperty("queue.buffering.max.messages", "10");
  props.setProperty("batch.num.messages", "5");

  node.setConfigProperties(props);
  // Set configuration parameters for Kafka
  node.setTopic("topic1");

  // Connect ports
  dag.addStream("Kafka message", generator.outputPort, node.inputPort).setLocality(Locality.CONTAINER_LOCAL);


  // Create local cluster
  final LocalMode.Controller lc = lma.getController();
  lc.runAsync();

  Future f = Executors.newFixedThreadPool(1).submit(listener);
  f.get(30, TimeUnit.SECONDS);

  lc.shutdown();

  // Check values send vs received
  Assert.assertEquals("Number of emitted tuples", maxTuple, listener.holdingBuffer.size());
  logger.debug(String.format("Number of emitted tuples: %d", listener.holdingBuffer.size()));
  Assert.assertEquals("First tuple", "testString 1", listener.getMessage(listener.holdingBuffer.peek()));

  listener.close();

}
 
Example 19
Source File: OffsetManagerTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
public void testPartitionableInputOperator(KafkaConsumer consumer, int expectedCount) throws Exception
{
  // Set to 3 because we want to make sure END_TUPLE from both 2 partitions are received and offsets has been updated to 102
  latch = new CountDownLatch(3);

  // Start producer
  KafkaTestProducer p = new KafkaTestProducer(TEST_TOPIC, true);
  p.setProducerType("sync");
  p.setSendCount(totalCount);
  // wait the producer send all messages
  p.run();
  p.close();

  // Create DAG for testing.
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  // Create KafkaSinglePortStringInputOperator
  KafkaSinglePortStringInputOperator node = dag.addOperator("Kafka message consumer", KafkaSinglePortStringInputOperator.class);


  TestOffsetManager tfm = new TestOffsetManager();

  tfm.setFilename(TEST_TOPIC + OFFSET_FILE);

  node.setInitialPartitionCount(1);
  node.setOffsetManager(tfm);
  node.setStrategy(PartitionStrategy.ONE_TO_MANY.toString());
  node.setRepartitionInterval(-1);

  //set topic
  consumer.setTopic(TEST_TOPIC);
  //set the zookeeper list used to initialize the partition
  SetMultimap<String, String> zookeeper = HashMultimap.create();
  String zks = "localhost:" + KafkaOperatorTestBase.TEST_ZOOKEEPER_PORT[0];
  consumer.setZookeeper(zks);
  consumer.setInitialOffset("earliest");

  node.setConsumer(consumer);

  // Create Test tuple collector
  CollectorModule collector = dag.addOperator("TestMessageCollector", new CollectorModule());

  // Connect ports
  dag.addStream("Kafka message", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);

  dag.setAttribute(Context.DAGContext.CHECKPOINT_WINDOW_COUNT, 1);

  // Create local cluster
  final LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(true);

  lc.runAsync();



  boolean isNotTimeout = latch.await(30000, TimeUnit.MILLISECONDS);
  // Wait 30s for consumer finish consuming all the messages and offsets has been updated to 100
  assertTrue("TIMEOUT: 30s, collected " + collectedTuples.size() + " tuples", isNotTimeout);

  // Check results
  assertEquals("Tuple count " + collectedTuples, expectedCount, collectedTuples.size());
  logger.debug(String.format("Number of emitted tuples: %d", collectedTuples.size()));

  p.close();
  lc.shutdown();
}
 
Example 20
Source File: KafkaInputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
/**
  * Test AbstractKafkaSinglePortInputOperator (i.e. an input adapter for
  * Kafka, aka consumer). This module receives data from an outside test
  * generator through Kafka message bus and feed that data into Malhar
  * streaming platform.
  *
  * [Generate message and send that to Kafka message bus] ==> [Receive that
  * message through Kafka input adapter(i.e. consumer) and send using
  * emitTuples() interface on output port during onMessage call]
  *
  *
  * @throws Exception
  */
 public void testKafkaInputOperator(int sleepTime, final int totalCount, KafkaConsumer consumer, boolean isValid, boolean idempotent) throws Exception
 {
   // initial the latch for this test
   latch = new CountDownLatch(1);


// Start producer
   KafkaTestProducer p = new KafkaTestProducer(TEST_TOPIC);
   p.setSendCount(totalCount);
   new Thread(p).start();

   // Create DAG for testing.
   LocalMode lma = LocalMode.newInstance();
   DAG dag = lma.getDAG();



   // Create KafkaSinglePortStringInputOperator
   KafkaSinglePortStringInputOperator node = dag.addOperator("Kafka message consumer", KafkaSinglePortStringInputOperator.class);
   if (isSuicide) {
     // make some extreme assumptions to make it fail if checkpointing wrong offsets
     dag.setAttribute(Context.DAGContext.CHECKPOINT_WINDOW_COUNT, 1);
     dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, new FSStorageAgent("target/ck", new Configuration()));
     node.setMaxTuplesPerWindow(500);
   }

   if (idempotent) {
     node.setWindowDataManager(new FSWindowDataManager());
   }
   consumer.setTopic(TEST_TOPIC);

   node.setConsumer(consumer);

   consumer.setCacheSize(5000);

   if (isValid) {
     node.setZookeeper("localhost:" + KafkaOperatorTestBase.TEST_ZOOKEEPER_PORT[0]);
   }

   // Create Test tuple collector
   CollectorModule<String> collector = dag.addOperator("TestMessageCollector", new CollectorModule<String>());

   // Connect ports
   dag.addStream("Kafka message", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);

   // Create local cluster
   final LocalMode.Controller lc = lma.getController();
   lc.setHeartbeatMonitoringEnabled(false);

   lc.runAsync();

   // Wait 30s for consumer finish consuming all the messages
   Assert.assertTrue("TIMEOUT: 30s ", latch.await(300000, TimeUnit.MILLISECONDS));

   // Check results
   Assert.assertTrue("Expected count >= " + totalCount + "; Actual count " + tupleCount.intValue(),
       totalCount <= tupleCount.intValue());
   logger.debug(String.format("Number of emitted tuples: %d", tupleCount.intValue()));

   p.close();
   lc.shutdown();
 }