Java Code Examples for com.datatorrent.api.LocalMode#newInstance()

The following examples show how to use com.datatorrent.api.LocalMode#newInstance() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StatefulApplicationTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testApplication() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  Configuration conf = new Configuration(false);
  conf.set("dt.operator.StatefulUniqueCounter.prop.tableName", "Test_Lookup_Cache");
  conf.set("dt.operator.StatefulUniqueCounter.prop.store.dbUrl", "jdbc:hsqldb:mem:test;sql.syntax_mys=true");
  conf.set("dt.operator.StatefulUniqueCounter.prop.store.dbDriver", "org.hsqldb.jdbcDriver");

  lma.prepareDAG(new StatefulApplication(), conf);
  lma.cloneDAG();
  LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(false);
  lc.runAsync();

  long now = System.currentTimeMillis();
  while (System.currentTimeMillis() - now < 15000) {
    Thread.sleep(1000);
  }

  lc.shutdown();
}
 
Example 2
Source File: OldFaithfulApplicationTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testSomeMethod() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  OldFaithfulApplication app = new OldFaithfulApplication();
  app.populateDAG(lma.getDAG(), new Configuration(false));

  try {
    LocalMode.Controller lc = lma.getController();
    lc.setHeartbeatMonitoringEnabled(false);
    lc.run(5000);
  } catch (Exception e) {
    LOG.error("Exception: ", e);
    Assert.fail("Unexpected exception.");
  }
}
 
Example 3
Source File: ApplicationTest.java    From examples with Apache License 2.0 6 votes vote down vote up
@Test
public void testApplication() throws Exception {
  try {
    LocalMode lma = LocalMode.newInstance();
    lma.prepareDAG(new Application(), getConfig());
    LocalMode.Controller lc = lma.getController();
    lc.runAsync();

    // wait for output files to show up
    while ( ! check(numFiles) ) {
      System.out.println("Sleeping ....");
      Thread.sleep(1000);
    }
  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }
}
 
Example 4
Source File: JdbcInputAppTest.java    From examples with Apache License 2.0 6 votes vote down vote up
@Test
public void testApplication() throws Exception
{
  try {
    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties-SimpleJdbcToHDFSApp.xml"));
    lma.prepareDAG(new JdbcHDFSApp(), conf);
    LocalMode.Controller lc = lma.getController();
    lc.runAsync();

    // wait for output files to roll      
    Thread.sleep(5000);

    String[] extensions = { "dat.0", "tmp" };
    Collection<File> list = FileUtils.listFiles(new File(FILE_NAME), extensions, false);
    Assert.assertEquals("Records in file", 10, FileUtils.readLines(list.iterator().next()).size());

  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }
}
 
Example 5
Source File: CassandraApplicationTest.java    From examples with Apache License 2.0 6 votes vote down vote up
@Test
public void testApplication() throws IOException, Exception
{
  try {
    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    conf.addResource(this.getClass().getResourceAsStream("/properties-CassandraOutputTestApp.xml"));
    conf.set("dt.operator.CassandraDataWriter.prop.store.node", "localhost");
    conf.set("dt.operator.CassandraDataWriter.prop.store.keyspace", KEYSPACE);
    conf.set("dt.operator.CassandraDataWriter.prop.tablename", TABLE_NAME);
    lma.prepareDAG(new Application(), conf);
    LocalMode.Controller lc = lma.getController();
    lc.run(10000); // runs for 10 seconds and quits

    //validate: Cassandra provides eventual consistency so not checking for exact record count.
    String recordsQuery = "SELECT * from " + KEYSPACE + "." + TABLE_NAME + ";";
    ResultSet resultSetRecords = session.execute(recordsQuery);
    Assert.assertTrue("No records were added to the table.", resultSetRecords.getAvailableWithoutFetching() > 0);
  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }
}
 
Example 6
Source File: KafkaInputBenchmarkTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testBenchmark() throws FileNotFoundException
{
  Configuration conf = new Configuration();
  InputStream is = new FileInputStream("src/site/conf/dt-site-kafka.xml");
  conf.addResource(is);

  LocalMode lma = LocalMode.newInstance();

  try {
    lma.prepareDAG(new KafkaInputBenchmark(), conf);
    LocalMode.Controller lc = lma.getController();
    lc.run(30000);
  } catch (Exception ex) {
    throw new RuntimeException(ex);
  }
}
 
Example 7
Source File: JdbcOperatorTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testApplication() throws Exception
{
  try {
    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties-JdbcToJdbcApp.xml"));
    lma.prepareDAG(new JdbcToJdbcApp(), conf);
    LocalMode.Controller lc = lma.getController();
    lc.runAsync();

    // wait for records to be added to table
    Thread.sleep(5000);

    Assert.assertEquals("Events in store", 10, getNumOfEventsInStore());
    dropTable();

  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }
}
 
Example 8
Source File: DeDupExampleTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void DeDupExampleTest() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  Configuration conf = new Configuration(false);
  conf.set("dt.application.DeDupExample.operator.console.silent", "true");
  DeDupExample app = new DeDupExample();
  lma.prepareDAG(app, conf);
  LocalMode.Controller lc = lma.getController();
  ((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
  {
    @Override
    public Boolean call() throws Exception
    {
      return DeDupExample.Collector.isDone();
    }
  });
  lc.run(50000);

  Assert.assertEquals(9, DeDupExample.Collector.getResult().getValue().size());

}
 
Example 9
Source File: FunctionOperatorTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void testMapOperator() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  NumberGenerator numGen = dag.addOperator("numGen", new NumberGenerator());
  FunctionOperator.MapFunctionOperator<Integer, Integer> mapper
      = dag.addOperator("mapper", new FunctionOperator.MapFunctionOperator<Integer, Integer>(new Square()));
  ResultCollector collector = dag.addOperator("collector", new ResultCollector());

  dag.addStream("raw numbers", numGen.output, mapper.input);
  dag.addStream("mapped results", mapper.output, collector.input);

  // Create local cluster
  LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(false);

  ((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
  {
    @Override
    public Boolean call() throws Exception
    {
      return TupleCount == NumTuples;
    }
  });

  lc.run(5000);

  Assert.assertEquals(sum, 285);
}
 
Example 10
Source File: ApplicationTest.java    From examples with Apache License 2.0 5 votes vote down vote up
private LocalMode.Controller asyncRun() throws Exception {
  LocalMode lma = LocalMode.newInstance();
  lma.prepareDAG(new ActiveMQApplication(), conf);
  LocalMode.Controller lc = lma.getController();
  lc.runAsync();
  return lc;
}
 
Example 11
Source File: Application.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  Configuration conf = new Configuration(false);
  lma.prepareDAG(new Application(), conf);
  LocalMode.Controller lc = lma.getController();
  lc.run();
}
 
Example 12
Source File: FileSplitterBaseTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void testSplitterInApp() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  SplitterApp app = new SplitterApp();
  Configuration appConf = new Configuration();
  appConf.set("dt.operator.Splitter.prop.blocksThreshold", "4");
  lma.prepareDAG(app, appConf);
  lma.cloneDAG(); // check serialization
  LocalMode.Controller lc = lma.getController();
  lc.runAsync();
  app.receiver.latch.await();
  Assert.assertEquals("no. of metadata", 12, app.receiver.count);
  lc.shutdown();
}
 
Example 13
Source File: CassandraApplicationTest.java    From examples with Apache License 2.0 5 votes vote down vote up
@Test
public void testApplication() throws IOException, Exception
{
  try {
    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties-CassandraInputApplication.xml"));
    conf.set("dt.operator.CassandraReader.prop.store.node", "localhost");
    conf.set("dt.operator.CassandraReader.prop.store.keyspace", KEYSPACE);
    conf.set("dt.operator.CassandraReader.prop.tablename", TABLE_NAME);
    lma.prepareDAG(new Application(), conf);
    LocalMode.Controller lc = lma.getController();

    ((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
    {
      @Override
      public Boolean call() throws Exception
      {
        if (getNumOfEventsInStore() == 10) {
          return true;
        }
        return false;
      }
    });
    lc.run(10000); // runs for 10 seconds and quits
    Assert.assertEquals(10, getNumOfEventsInStore());
  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }
}
 
Example 14
Source File: ApplicationTest.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
@Test
public void testApplication() throws IOException, Exception {
  try {
    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties.xml"));
    lma.prepareDAG(new Application(), conf);
    LocalMode.Controller lc = lma.getController();
    lc.run(10000); // runs for 10 seconds and quits
  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }
}
 
Example 15
Source File: ApplicationTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void testApplication() throws IOException, Exception
{
  try {
    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties.xml"));
    lma.prepareDAG(new Application(), conf);
    LocalMode.Controller lc = lma.getController();
    lc.run(5000); // runs for 5 seconds and quits
  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }
}
 
Example 16
Source File: KafkaEndpointTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void testApplicationWithPortEndpoint() throws Exception
{
  try {
    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    lma.prepareDAG(new KafkaPortApplication(kafka.getBroker(), testTopicData0, testTopicResult), conf);
    LocalMode.Controller lc = lma.getController();
    lc.runAsync();

    kafka.publish(testTopicData0, Arrays.asList("15/02/2016 10:15:00 +0000,1,paint1,11",
        "15/02/2016 10:16:00 +0000,2,paint2,12",
        "15/02/2016 10:17:00 +0000,3,paint3,13", "15/02/2016 10:18:00 +0000,4,paint4,14",
        "15/02/2016 10:19:00 +0000,5,paint5,15", "15/02/2016 10:10:00 +0000,6,abcde6,16"));

    // TODO: Workaround to add \r\n char to test results because of bug in CsvFormatter which adds new line char.
    String[] expectedLines = new String[]{"15/02/2016 10:18:00 +0000,15/02/2016 00:00:00 +0000,OILPAINT4\r\n",
        "15/02/2016 10:19:00 +0000,15/02/2016 00:00:00 +0000,OILPAINT5\r\n"};

    List<String> consume = kafka.consume(testTopicResult, 30000);

    Assert.assertTrue(Arrays.deepEquals(consume.toArray(new String[consume.size()]), expectedLines));

    lc.shutdown();
  } catch (Exception e) {
    Assert.fail("constraint violations: " + e);
  }
}
 
Example 17
Source File: OffsetManagerTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
public void testPartitionableInputOperator(KafkaConsumer consumer, int expectedCount) throws Exception
{
  // Set to 3 because we want to make sure END_TUPLE from both 2 partitions are received and offsets has been updated to 102
  latch = new CountDownLatch(3);

  // Start producer
  KafkaTestProducer p = new KafkaTestProducer(TEST_TOPIC, true);
  p.setProducerType("sync");
  p.setSendCount(totalCount);
  // wait the producer send all messages
  p.run();
  p.close();

  // Create DAG for testing.
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  // Create KafkaSinglePortStringInputOperator
  KafkaSinglePortStringInputOperator node = dag.addOperator("Kafka message consumer", KafkaSinglePortStringInputOperator.class);


  TestOffsetManager tfm = new TestOffsetManager();

  tfm.setFilename(TEST_TOPIC + OFFSET_FILE);

  node.setInitialPartitionCount(1);
  node.setOffsetManager(tfm);
  node.setStrategy(PartitionStrategy.ONE_TO_MANY.toString());
  node.setRepartitionInterval(-1);

  //set topic
  consumer.setTopic(TEST_TOPIC);
  //set the zookeeper list used to initialize the partition
  SetMultimap<String, String> zookeeper = HashMultimap.create();
  String zks = "localhost:" + KafkaOperatorTestBase.TEST_ZOOKEEPER_PORT[0];
  consumer.setZookeeper(zks);
  consumer.setInitialOffset("earliest");

  node.setConsumer(consumer);

  // Create Test tuple collector
  CollectorModule collector = dag.addOperator("TestMessageCollector", new CollectorModule());

  // Connect ports
  dag.addStream("Kafka message", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);

  dag.setAttribute(Context.DAGContext.CHECKPOINT_WINDOW_COUNT, 1);

  // Create local cluster
  final LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(true);

  lc.runAsync();



  boolean isNotTimeout = latch.await(30000, TimeUnit.MILLISECONDS);
  // Wait 30s for consumer finish consuming all the messages and offsets has been updated to 100
  assertTrue("TIMEOUT: 30s, collected " + collectedTuples.size() + " tuples", isNotTimeout);

  // Check results
  assertEquals("Tuple count " + collectedTuples, expectedCount, collectedTuples.size());
  logger.debug(String.format("Number of emitted tuples: %d", collectedTuples.size()));

  p.close();
  lc.shutdown();
}
 
Example 18
Source File: KafkaInputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
/**
  * Test AbstractKafkaSinglePortInputOperator (i.e. an input adapter for
  * Kafka, aka consumer). This module receives data from an outside test
  * generator through Kafka message bus and feed that data into Malhar
  * streaming platform.
  *
  * [Generate message and send that to Kafka message bus] ==> [Receive that
  * message through Kafka input adapter(i.e. consumer) and send using
  * emitTuples() interface on output port during onMessage call]
  *
  *
  * @throws Exception
  */
 public void testKafkaInputOperator(int sleepTime, final int totalCount, KafkaConsumer consumer, boolean isValid, boolean idempotent) throws Exception
 {
   // initial the latch for this test
   latch = new CountDownLatch(1);


// Start producer
   KafkaTestProducer p = new KafkaTestProducer(TEST_TOPIC);
   p.setSendCount(totalCount);
   new Thread(p).start();

   // Create DAG for testing.
   LocalMode lma = LocalMode.newInstance();
   DAG dag = lma.getDAG();



   // Create KafkaSinglePortStringInputOperator
   KafkaSinglePortStringInputOperator node = dag.addOperator("Kafka message consumer", KafkaSinglePortStringInputOperator.class);
   if (isSuicide) {
     // make some extreme assumptions to make it fail if checkpointing wrong offsets
     dag.setAttribute(Context.DAGContext.CHECKPOINT_WINDOW_COUNT, 1);
     dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, new FSStorageAgent("target/ck", new Configuration()));
     node.setMaxTuplesPerWindow(500);
   }

   if (idempotent) {
     node.setWindowDataManager(new FSWindowDataManager());
   }
   consumer.setTopic(TEST_TOPIC);

   node.setConsumer(consumer);

   consumer.setCacheSize(5000);

   if (isValid) {
     node.setZookeeper("localhost:" + KafkaOperatorTestBase.TEST_ZOOKEEPER_PORT[0]);
   }

   // Create Test tuple collector
   CollectorModule<String> collector = dag.addOperator("TestMessageCollector", new CollectorModule<String>());

   // Connect ports
   dag.addStream("Kafka message", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);

   // Create local cluster
   final LocalMode.Controller lc = lma.getController();
   lc.setHeartbeatMonitoringEnabled(false);

   lc.runAsync();

   // Wait 30s for consumer finish consuming all the messages
   Assert.assertTrue("TIMEOUT: 30s ", latch.await(300000, TimeUnit.MILLISECONDS));

   // Check results
   Assert.assertTrue("Expected count >= " + totalCount + "; Actual count " + tupleCount.intValue(),
       totalCount <= tupleCount.intValue());
   logger.debug(String.format("Number of emitted tuples: %d", tupleCount.intValue()));

   p.close();
   lc.shutdown();
 }
 
Example 19
Source File: KafkaPartitionableInputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
public void testPartitionableInputOperator(KafkaConsumer consumer) throws Exception
{

  // each broker should get a END_TUPLE message
  latch = new CountDownLatch(totalBrokers);

  int totalCount = 10000;

  // Start producer
  KafkaTestProducer p = new KafkaTestProducer(TEST_TOPIC, hasMultiPartition, hasMultiCluster);
  p.setSendCount(totalCount);
  new Thread(p).start();

  // Create DAG for testing.
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  // Create KafkaSinglePortStringInputOperator
  KafkaSinglePortStringInputOperator node = dag.addOperator("Kafka message consumer", KafkaSinglePortStringInputOperator.class);
  node.setInitialPartitionCount(1);

  // set topic
  consumer.setTopic(TEST_TOPIC);
  consumer.setInitialOffset("earliest");

  node.setConsumer(consumer);

  String clusterString = "cluster1::localhost:" + TEST_ZOOKEEPER_PORT[0] + (hasMultiCluster ? ";cluster2::localhost:" + TEST_ZOOKEEPER_PORT[1] : "");
  node.setZookeeper(clusterString);

  // Create Test tuple collector
  CollectorModule<String> collector = dag.addOperator("TestMessageCollector", new CollectorModule<String>());

  // Connect ports
  dag.addStream("Kafka message", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);

  // Create local cluster
  final LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(false);

  lc.runAsync();

  // Wait 30s for consumer finish consuming all the messages
  Assert.assertTrue("TIMEOUT: 40s ", latch.await(40000, TimeUnit.MILLISECONDS));

  // Check results
  Assert.assertEquals("Collections size", 1, collections.size());
  Assert.assertEquals("Tuple count", totalCount, collections.get(collector.inputPort.id).size());
  logger.debug(String.format("Number of emitted tuples: %d", collections.get(collector.inputPort.id).size()));

  p.close();
  lc.shutdown();
  // kafka has a bug shutdown connector you have to make sure kafka client resource has been cleaned before clean the broker
  Thread.sleep(5000);
}
 
Example 20
Source File: KafkaEndpointTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
@Ignore("Skipping because POJOInnerJoinOperator has issues and needs to be replaced with Windowed variant first.")
@Test
public void testApplicationJoin() throws Exception
{
  String sql = "INSERT INTO SALES " +
      "SELECT STREAM A.ROWTIME, FLOOR(A.ROWTIME TO DAY), " +
      "APEXCONCAT('OILPAINT', SUBSTRING(A.PRODUCT, 6, 7)), B.CATEGORY " +
      "FROM ORDERS AS A " +
      "JOIN CATEGORY AS B ON A.id = B.id " +
      "WHERE A.id > 3 AND A.PRODUCT LIKE 'paint%'";
  try {
    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    lma.prepareDAG(new KafkaJoinApplication(kafka.getBroker(), testTopicData0, testTopicData1,
        testTopicResult, sql), conf);
    LocalMode.Controller lc = lma.getController();
    lc.runAsync();

    kafka.publish(testTopicData0, Arrays.asList("15/02/2016 10:15:00 +0000,1,paint1,11",
        "15/02/2016 10:16:00 +0000,2,paint2,12",
        "15/02/2016 10:17:00 +0000,3,paint3,13", "15/02/2016 10:18:00 +0000,4,paint4,14",
        "15/02/2016 10:19:00 +0000,5,paint5,15", "15/02/2016 10:10:00 +0000,6,abcde6,16"));

    kafka.publish(testTopicData1, Arrays.asList("1,ABC",
        "2,DEF",
        "3,GHI", "4,JKL",
        "5,MNO", "6,PQR"));

    // TODO: Workaround to add \r\n char to test results because of bug in CsvFormatter which adds new line char.
    String[] expectedLines = new String[]{"15/02/2016 10:18:00 +0000,15/02/2016 00:00:00 +0000,OILPAINT4,JKL\r\n",
        "15/02/2016 10:19:00 +0000,15/02/2016 00:00:00 +0000,OILPAINT5,MNO\r\n"};

    List<String> consume = kafka.consume(testTopicResult, 30000);

    Assert.assertTrue(Arrays.deepEquals(consume.toArray(new String[consume.size()]), expectedLines));

    lc.shutdown();
  } catch (Exception e) {
    Assert.fail("constraint violations: " + e);
  }
}