Java Code Examples for com.datatorrent.api.LocalMode#getController()

The following examples show how to use com.datatorrent.api.LocalMode#getController() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: EventIncrementerAppTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testEventIncrementerApp() throws FileNotFoundException, IOException
{
  Logger logger = LoggerFactory.getLogger(EventIncrementerAppTest.class);
  LocalMode lm = LocalMode.newInstance();
  Configuration conf = new Configuration();
  InputStream is = new FileInputStream("src/site/conf/dt-site-testbench.xml");
  conf.addResource(is);
  conf.get("dt.application.EventIncrementerApp.operator.hmapOper.seed");
  conf.get("dt.application.EventIncrementerApp.operator.hmapOper.keys");
  conf.get("dt.application.EventIncrementerApp.operator.hmapOper.numKeys");
  try {
    lm.prepareDAG(new EventIncrementerApp(), conf);
    LocalMode.Controller lc = lm.getController();
    lc.run(20000);
  } catch (Exception ex) {
    logger.info(ex.getMessage());
  }
  is.close();
}
 
Example 2
Source File: S3RecordReaderModuleAppTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test(expected = IllegalArgumentException.class)
public void testS3MissingRecordLength() throws Exception
{
  S3FixedWidthApplication app = new S3FixedWidthApplication();
  LocalMode lma = LocalMode.newInstance();
  Configuration conf = new Configuration(false);
  conf.set("dt.operator.S3RecordReaderModule.prop.files", files);
  //Should give IllegalArgumentException since recordLength is not set
  //conf.set("dt.operator.S3RecordReaderModule.prop.recordLength", "8");
  conf.set("dt.operator.S3RecordReaderModule.prop.blocksThreshold", "1");
  conf.set("dt.operator.S3RecordReaderModule.prop.scanIntervalMillis", "10000");

  lma.prepareDAG(app, conf);
  LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(true);
  lc.runAsync();
  LOG.debug("Waiting for app to finish");
  Thread.sleep(1000 * 1);
  lc.shutdown();
}
 
Example 3
Source File: KafkaInputBenchmarkTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testBenchmark() throws FileNotFoundException
{
  Configuration conf = new Configuration();
  InputStream is = new FileInputStream("src/site/conf/dt-site-kafka.xml");
  conf.addResource(is);

  LocalMode lma = LocalMode.newInstance();

  try {
    lma.prepareDAG(new KafkaInputBenchmark(), conf);
    LocalMode.Controller lc = lma.getController();
    lc.run(30000);
  } catch (Exception ex) {
    throw new RuntimeException(ex);
  }
}
 
Example 4
Source File: S3RecordReaderModuleAppTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testS3FixedWidthRecords() throws Exception
{

  S3FixedWidthApplication app = new S3FixedWidthApplication();
  LocalMode lma = LocalMode.newInstance();
  Configuration conf = new Configuration(false);
  conf.set("dt.operator.S3RecordReaderModule.prop.files", files);
  conf.set("dt.operator.S3RecordReaderModule.prop.recordLength", "8");
  conf.set("dt.operator.S3RecordReaderModule.prop.blocksThreshold", "1");
  conf.set("dt.operator.S3RecordReaderModule.prop.scanIntervalMillis", "10000");

  lma.prepareDAG(app, conf);
  LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(true);
  lc.runAsync();
  LOG.debug("Waiting for app to finish");
  Thread.sleep(1000 * 1);
  lc.shutdown();
}
 
Example 5
Source File: JdbcIOAppTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testApplication() throws Exception
{
  try {
    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    lma.prepareDAG(new JdbcIOApp(), conf);
    LocalMode.Controller lc = lma.getController();
    lc.runAsync();
    // wait for records to be added to table
    Thread.sleep(3000);
    lc.shutdown();
    Assert.assertEquals("Events in store", 10, getNumOfEventsInStore());
  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }
}
 
Example 6
Source File: AutoCompleteTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void AutoCompleteTest() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  Configuration conf = new Configuration(false);
  conf.set("dt.application.AutoComplete.operator.console.silent", "true");
  lma.prepareDAG(new AutoComplete(), conf);
  LocalMode.Controller lc = lma.getController();

  ((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
  {
    @Override
    public Boolean call() throws Exception
    {
      return AutoComplete.Collector.isDone();
    }
  });

  lc.run(200000);

  Assert.assertTrue(AutoComplete.Collector.getResult().containsKey("had"));
  Assert.assertTrue(AutoComplete.Collector.getResult().containsKey("hadoop"));
  Assert.assertEquals(2, AutoComplete.Collector.getResult().get("mapreduce").get(0).getCount());

}
 
Example 7
Source File: ParquetFilePOJOReaderTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testApplication() throws IOException, Exception
{
  try {
    FileContext.getLocalFSFileContext().delete(new Path(new File(testMeta.dir).getAbsolutePath()), true);
    List<EventRecord> data = Lists.newArrayList();
    data.add(new EventRecord(1, "cust1", 12321L, true, 12.22f, 12.23));
    data.add(new EventRecord(2, "cust2", 12322L, true, 22.22f, 22.23));
    data.add(new EventRecord(3, "cust3", 12323L, true, 32.22f, 32.23));
    writeParquetFile(PARQUET_SCHEMA, new File(testMeta.dir, "data.parquet"), data);
    parquetFilePOJOReader.setDirectory(testMeta.dir);
    parquetFilePOJOReader.setParquetSchema(PARQUET_SCHEMA);
    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    ParquetReaderApplication parquetReaderApplication = new ParquetReaderApplication();
    parquetReaderApplication.setParquetFilePOJOReader(parquetFilePOJOReader);
    lma.prepareDAG(parquetReaderApplication, conf);
    LocalMode.Controller lc = lma.getController();
    lc.run(10000);// runs for 10 seconds and quits
  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }
}
 
Example 8
Source File: ApexStreamImpl.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Override
public void runEmbedded(boolean async, long duration, Callable<Boolean> exitCondition)
{
  LocalMode lma = LocalMode.newInstance();
  populateDag(lma.getDAG());
  DAG dag = lma.getDAG();
  LocalMode.Controller lc = lma.getController();
  if (lc instanceof StramLocalCluster) {
    ((StramLocalCluster)lc).setExitCondition(exitCondition);
  }
  if (async) {
    lc.runAsync();
  } else {
    if (duration >= 0) {
      lc.run(duration);
    } else {
      lc.run();
    }
  }

}
 
Example 9
Source File: FilteredEventClassifierAppTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testFilterClassifierApp() throws FileNotFoundException, IOException
{
  Logger logger = LoggerFactory.getLogger(FilteredEventClassifierAppTest.class);
  LocalMode lm = LocalMode.newInstance();
  Configuration conf = new Configuration();
  InputStream is = new FileInputStream("src/site/conf/dt-site-testbench.xml");
  conf.addResource(is);
  conf.get("dt.application.FilteredEventClassifierApp.operator.hmapOper.keys");
  conf.get("dt.application.FilteredEventClassifierApp.operator.hmapOper.numKeys");
  try {
    lm.prepareDAG(new FilteredEventClassifierApp(), conf);
    LocalMode.Controller lc = lm.getController();
    lc.run(20000);
  } catch (Exception ex) {
    logger.info(ex.getMessage());
  }
  is.close();
}
 
Example 10
Source File: OldFaithfulApplicationTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testSomeMethod() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  OldFaithfulApplication app = new OldFaithfulApplication();
  app.populateDAG(lma.getDAG(), new Configuration(false));

  try {
    LocalMode.Controller lc = lma.getController();
    lc.setHeartbeatMonitoringEnabled(false);
    lc.run(5000);
  } catch (Exception e) {
    LOG.error("Exception: ", e);
    Assert.fail("Unexpected exception.");
  }
}
 
Example 11
Source File: ApplicationTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
/**
 * This will run for ever.
 *
 * @throws Exception
 */
@Test
public void testApplication() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  new YahooFinanceApplication().populateDAG(lma.getDAG(), new Configuration(false));
  LocalMode.Controller lc = lma.getController();
  lc.run(10000);
}
 
Example 12
Source File: ApplicationTest.java    From examples with Apache License 2.0 5 votes vote down vote up
private LocalMode.Controller asyncRun() throws Exception {
  LocalMode lma = LocalMode.newInstance();
  lma.prepareDAG(new ActiveMQApplication(), conf);
  LocalMode.Controller lc = lma.getController();
  lc.runAsync();
  return lc;
}
 
Example 13
Source File: ApplicationWithDerbySQLTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void testSomeMethod() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  new ApplicationWithDerbySQL().populateDAG(lma.getDAG(), new Configuration(false));
  LocalMode.Controller lc = lma.getController();

  long start = System.currentTimeMillis();
  lc.run();
  long end = System.currentTimeMillis();
  long time = end - start;
  LOG.debug("Test used " + time + " ms");
}
 
Example 14
Source File: ApplicationWithStreamAPITest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void testWordcountApplication() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  Configuration conf = new Configuration(false);
  conf.set("dt.application.WordCountStreamingApiDemo.operator.WCOutput.silent", "true");
  conf.set("dt.application.WordCountStreamingApiDemo.operator.WordOutput.silent", "true");
  lma.prepareDAG(new ApplicationWithStreamAPI(), conf);
  LocalMode.Controller lc = lma.getController();
  long start = System.currentTimeMillis();
  lc.run(5000);
  long end = System.currentTimeMillis();
  long time = end - start;
  logger.info("Test used " + time + " ms");
}
 
Example 15
Source File: MaxPerKeyExamplesTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void MaxPerKeyExampleTest() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  Configuration conf = new Configuration(false);
  setConfig(conf);

  MaxPerKeyExamples app = new MaxPerKeyExamples();

  lma.prepareDAG(app, conf);

  LocalMode.Controller lc = lma.getController();
  ((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
  {
    @Override
    public Boolean call() throws Exception
    {
      return getNumEntries() == 2;
    }
  });

  lc.run(5000);

  double[] result = new double[2];
  result[0] = getMaxMeanTemp().get(6);
  result[1] = getMaxMeanTemp().get(7);
  Assert.assertArrayEquals(MEANTEMPS, result, 0.0);
}
 
Example 16
Source File: KeyValueStoreOperatorTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
public void testInputOperator() throws Exception
{
  testStore.connect();
  testStore.put("test_abc", "789");
  testStore.put("test_def", "456");
  testStore.put("test_ghi", "123");
  try {
    LocalMode lma = LocalMode.newInstance();
    DAG dag = lma.getDAG();
    @SuppressWarnings("unchecked")
    InputOperator<S> inputOperator = dag.addOperator("input", new InputOperator<S>());
    CollectorModule<Object> collector = dag.addOperator("collector", new CollectorModule<Object>());
    inputOperator.addKey("test_abc");
    inputOperator.addKey("test_def");
    inputOperator.addKey("test_ghi");
    inputOperator.setStore(operatorStore);
    dag.addStream("stream", inputOperator.outputPort, collector.inputPort);
    final LocalMode.Controller lc = lma.getController();
    lc.run(3000);
    lc.shutdown();
    Assert.assertEquals("789", CollectorModule.resultMap.get("test_abc"));
    Assert.assertEquals("456", CollectorModule.resultMap.get("test_def"));
    Assert.assertEquals("123", CollectorModule.resultMap.get("test_ghi"));

  } finally {
    testStore.remove("test_abc");
    testStore.remove("test_def");
    testStore.remove("test_ghi");
    testStore.disconnect();
  }
}
 
Example 17
Source File: JdbcPollerApplicationTest.java    From examples with Apache License 2.0 5 votes vote down vote up
@Test
public void testApplication() throws Exception
{
  try {
    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    conf.set("dt.application.PollJdbcToHDFSApp.operator.JdbcPoller.prop.store.databaseUrl", URL);
    conf.set("dt.application.PollJdbcToHDFSApp.operator.JdbcPoller.prop.store.databaseDriver", DB_DRIVER);
    conf.setInt("dt.application.PollJdbcToHDFSApp.operator.JdbcPoller.prop.partitionCount", 2);
    conf.set("dt.application.PollJdbcToHDFSApp.operator.JdbcPoller.prop.key", "ACCOUNT_NO");
    conf.set("dt.application.PollJdbcToHDFSApp.operator.JdbcPoller.prop.columnsExpression", "ACCOUNT_NO,NAME,AMOUNT");
    conf.set("dt.application.PollJdbcToHDFSApp.operator.JdbcPoller.prop.tableName", TABLE_NAME);
    conf.set("dt.application.PollJdbcToHDFSApp.operator.JdbcPoller.port.outputPort.attr.TUPLE_CLASS",
        "com.example.mydtapp.PojoEvent");
    conf.set("dt.application.PollJdbcToHDFSApp.operator.Writer.filePath", OUTPUT_DIR_NAME);

    lma.prepareDAG(new JdbcPollerApplication(), conf);
    LocalMode.Controller lc = lma.getController();
    lc.runAsync();

    // wait for output files to roll      
    Thread.sleep(5000);

    String[] extensions = { "dat.0", "tmp" };
    Collection<File> list = FileUtils.listFiles(new File(OUTPUT_DIR_NAME), extensions, false);
    int recordsCount = 0;
    for (File file : list) {
      recordsCount += FileUtils.readLines(file).size();
    }
    Assert.assertEquals("Records in file", 10, recordsCount);

  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }
}
 
Example 18
Source File: ApplicationTest.java    From examples with Apache License 2.0 5 votes vote down vote up
private LocalMode.Controller asyncRun() throws Exception {
  Configuration conf = getConfig();
  LocalMode lma = LocalMode.newInstance();
  lma.prepareDAG(new KafkaApp(), conf);
  LocalMode.Controller lc = lma.getController();
  lc.runAsync();
  return lc;
}
 
Example 19
Source File: KafkaEndpointTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
@Ignore("Skipping because POJOInnerJoinOperator has issues and needs to be replaced with Windowed variant first.")
@Test
public void testApplicationJoin() throws Exception
{
  String sql = "INSERT INTO SALES " +
      "SELECT STREAM A.ROWTIME, FLOOR(A.ROWTIME TO DAY), " +
      "APEXCONCAT('OILPAINT', SUBSTRING(A.PRODUCT, 6, 7)), B.CATEGORY " +
      "FROM ORDERS AS A " +
      "JOIN CATEGORY AS B ON A.id = B.id " +
      "WHERE A.id > 3 AND A.PRODUCT LIKE 'paint%'";
  try {
    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    lma.prepareDAG(new KafkaJoinApplication(kafka.getBroker(), testTopicData0, testTopicData1,
        testTopicResult, sql), conf);
    LocalMode.Controller lc = lma.getController();
    lc.runAsync();

    kafka.publish(testTopicData0, Arrays.asList("15/02/2016 10:15:00 +0000,1,paint1,11",
        "15/02/2016 10:16:00 +0000,2,paint2,12",
        "15/02/2016 10:17:00 +0000,3,paint3,13", "15/02/2016 10:18:00 +0000,4,paint4,14",
        "15/02/2016 10:19:00 +0000,5,paint5,15", "15/02/2016 10:10:00 +0000,6,abcde6,16"));

    kafka.publish(testTopicData1, Arrays.asList("1,ABC",
        "2,DEF",
        "3,GHI", "4,JKL",
        "5,MNO", "6,PQR"));

    // TODO: Workaround to add \r\n char to test results because of bug in CsvFormatter which adds new line char.
    String[] expectedLines = new String[]{"15/02/2016 10:18:00 +0000,15/02/2016 00:00:00 +0000,OILPAINT4,JKL\r\n",
        "15/02/2016 10:19:00 +0000,15/02/2016 00:00:00 +0000,OILPAINT5,MNO\r\n"};

    List<String> consume = kafka.consume(testTopicResult, 30000);

    Assert.assertTrue(Arrays.deepEquals(consume.toArray(new String[consume.size()]), expectedLines));

    lc.shutdown();
  } catch (Exception e) {
    Assert.fail("constraint violations: " + e);
  }
}
 
Example 20
Source File: OffsetManagerTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
public void testPartitionableInputOperator(KafkaConsumer consumer, int expectedCount) throws Exception
{
  // Set to 3 because we want to make sure END_TUPLE from both 2 partitions are received and offsets has been updated to 102
  latch = new CountDownLatch(3);

  // Start producer
  KafkaTestProducer p = new KafkaTestProducer(TEST_TOPIC, true);
  p.setProducerType("sync");
  p.setSendCount(totalCount);
  // wait the producer send all messages
  p.run();
  p.close();

  // Create DAG for testing.
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  // Create KafkaSinglePortStringInputOperator
  KafkaSinglePortStringInputOperator node = dag.addOperator("Kafka message consumer", KafkaSinglePortStringInputOperator.class);


  TestOffsetManager tfm = new TestOffsetManager();

  tfm.setFilename(TEST_TOPIC + OFFSET_FILE);

  node.setInitialPartitionCount(1);
  node.setOffsetManager(tfm);
  node.setStrategy(PartitionStrategy.ONE_TO_MANY.toString());
  node.setRepartitionInterval(-1);

  //set topic
  consumer.setTopic(TEST_TOPIC);
  //set the zookeeper list used to initialize the partition
  SetMultimap<String, String> zookeeper = HashMultimap.create();
  String zks = "localhost:" + KafkaOperatorTestBase.TEST_ZOOKEEPER_PORT[0];
  consumer.setZookeeper(zks);
  consumer.setInitialOffset("earliest");

  node.setConsumer(consumer);

  // Create Test tuple collector
  CollectorModule collector = dag.addOperator("TestMessageCollector", new CollectorModule());

  // Connect ports
  dag.addStream("Kafka message", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);

  dag.setAttribute(Context.DAGContext.CHECKPOINT_WINDOW_COUNT, 1);

  // Create local cluster
  final LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(true);

  lc.runAsync();



  boolean isNotTimeout = latch.await(30000, TimeUnit.MILLISECONDS);
  // Wait 30s for consumer finish consuming all the messages and offsets has been updated to 100
  assertTrue("TIMEOUT: 30s, collected " + collectedTuples.size() + " tuples", isNotTimeout);

  // Check results
  assertEquals("Tuple count " + collectedTuples, expectedCount, collectedTuples.size());
  logger.debug(String.format("Number of emitted tuples: %d", collectedTuples.size()));

  p.close();
  lc.shutdown();
}