Java Code Examples for com.datatorrent.api.LocalMode#Controller

The following examples show how to use com.datatorrent.api.LocalMode#Controller . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StramLocalClusterTest.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
@Test
public void testDynamicLoading() throws Exception
{
  String generatedJar = generatejar("POJO");
  URLClassLoader uCl = URLClassLoader.newInstance(new URL[] {new File(generatedJar).toURI().toURL()});
  Class<?> pojo = uCl.loadClass("POJO");

  DynamicLoaderApp app = new DynamicLoaderApp();
  app.generatedJar = generatedJar;
  app.pojo = pojo;

  LocalMode lma = LocalMode.newInstance();
  lma.prepareDAG(app, new Configuration());
  LocalMode.Controller lc = lma.getController();
  lc.runAsync();
  DynamicLoaderApp.latch.await();
  Assert.assertTrue(DynamicLoaderApp.passed);
  lc.shutdown();
}
 
Example 2
Source File: EventClassifierAppTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testEventClassifierApp() throws FileNotFoundException, IOException
{
  Logger logger = LoggerFactory.getLogger(EventClassifierAppTest.class);
  LocalMode lm = LocalMode.newInstance();
  Configuration conf = new Configuration();
  InputStream is = new FileInputStream("src/site/conf/dt-site-testbench.xml");
  conf.addResource(is);
  conf.get("dt.application.EventClassifierApp.operator.hmapOper.keys");
  conf.get("dt.application.EventClassifierApp.operator.hmapOper.numKeys");
  try {
    lm.prepareDAG(new EventClassifierApp(), conf);
    LocalMode.Controller lc = lm.getController();
    lc.run(20000);
  } catch (Exception ex) {
    logger.info(ex.getMessage());
  }
  is.close();
}
 
Example 3
Source File: AutoCompleteTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void AutoCompleteTest() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  Configuration conf = new Configuration(false);
  conf.set("dt.application.AutoComplete.operator.console.silent", "true");
  lma.prepareDAG(new AutoComplete(), conf);
  LocalMode.Controller lc = lma.getController();

  ((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
  {
    @Override
    public Boolean call() throws Exception
    {
      return AutoComplete.Collector.isDone();
    }
  });

  lc.run(200000);

  Assert.assertTrue(AutoComplete.Collector.getResult().containsKey("had"));
  Assert.assertTrue(AutoComplete.Collector.getResult().containsKey("hadoop"));
  Assert.assertEquals(2, AutoComplete.Collector.getResult().get("mapreduce").get(0).getCount());

}
 
Example 4
Source File: JdbcInputOperatorApplicationTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
public void testApplication(StreamingApplication streamingApplication) throws Exception
{
  try {
    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    conf.addResource(this.getClass().getResourceAsStream("/JdbcProperties.xml"));
    lma.prepareDAG(streamingApplication, conf);
    LocalMode.Controller lc = lma.getController();
    lc.setHeartbeatMonitoringEnabled(false);
    ((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
    {
      @Override
      public Boolean call() throws Exception
      {
        return TupleCount == 10;
      }
    });
    lc.run(10000);// runs for 10 seconds and quits
    Assert.assertEquals("rows in db", TupleCount, getNumOfRowsinTable(TABLE_POJO_NAME));
  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }

}
 
Example 5
Source File: AvroFileInputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testApplication() throws IOException, Exception
{
  try {
    FileContext.getLocalFSFileContext().delete(new Path(new File(testMeta.dir).getAbsolutePath()), true);
    int cnt = 7;
    createAvroInput(cnt);
    writeAvroFile(new File(FILENAME));
    createAvroInput(cnt - 2);
    writeAvroFile(new File(OTHER_FILE));
    avroFileInput.setDirectory(testMeta.dir);

    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);

    AvroReaderApplication avroReaderApplication = new AvroReaderApplication();
    avroReaderApplication.setAvroFileInputOperator(avroFileInput);
    lma.prepareDAG(avroReaderApplication, conf);

    LocalMode.Controller lc = lma.getController();
    lc.run(10000);// runs for 10 seconds and quits
  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }
}
 
Example 6
Source File: LocalApexDoTask.java    From incubator-samoa with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {

    List<String> tmpArgs = new ArrayList<String>(Arrays.asList(args));

    args = tmpArgs.toArray(new String[0]);

    ApexTopology apexTopo = ApexSamoaUtils.argsToTopology(args);

    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
//    conf.set("dt.loggers.level", "org.apache.*:DEBUG");

    try {
      lma.prepareDAG(new ApexTask(apexTopo), conf);
      System.out.println("Dag Set in lma: " + lma.getDAG());
      ((LogicalPlan) lma.getDAG()).validate();
    } catch (Exception e) {
      throw new RuntimeException(e);
    }
    LocalMode.Controller lc = lma.getController();
    lc.setHeartbeatMonitoringEnabled(false);

    lc.runAsync();
  }
 
Example 7
Source File: KafkaEndpointTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void testApplicationSelectInsertWithAPI() throws Exception
{
  try {
    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    lma.prepareDAG(new KafkaApplication(kafka.getBroker(), testTopicData0, testTopicResult), conf);
    LocalMode.Controller lc = lma.getController();
    lc.runAsync();

    kafka.publish(testTopicData0, Arrays.asList("15/02/2016 10:15:00 +0000,1,paint1,11",
        "15/02/2016 10:16:00 +0000,2,paint2,12",
        "15/02/2016 10:17:00 +0000,3,paint3,13", "15/02/2016 10:18:00 +0000,4,paint4,14",
        "15/02/2016 10:19:00 +0000,5,paint5,15", "15/02/2016 10:10:00 +0000,6,abcde6,16"));

    // TODO: Workaround to add \r\n char to test results because of bug in CsvFormatter which adds new line char.
    String[] expectedLines = new String[]{"15/02/2016 10:18:00 +0000,15/02/2016 00:00:00 +0000,OILPAINT4\r\n",
        "15/02/2016 10:19:00 +0000,15/02/2016 00:00:00 +0000,OILPAINT5\r\n"};

    List<String> consume = kafka.consume(testTopicResult, 30000);
    Assert.assertTrue(Arrays.deepEquals(consume.toArray(new String[consume.size()]), expectedLines));

    lc.shutdown();
  } catch (Exception e) {
    Assert.fail("constraint violations: " + e);
  }
}
 
Example 8
Source File: SQLApplicationWithModelFileTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void test() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  Configuration conf = new Configuration(false);
  conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties.xml"));
  conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties-SQLApplicationWithModelFile.xml"));

  SQLApplicationWithModelFile app = new SQLApplicationWithModelFile();

  lma.prepareDAG(app, conf);

  LocalMode.Controller lc = lma.getController();

  PrintStream originalSysout = System.out;
  final ByteArrayOutputStream baos = new ByteArrayOutputStream();
  System.setOut(new PrintStream(baos));

  lc.runAsync();
  waitTillStdoutIsPopulated(baos, 30000);
  lc.shutdown();

  System.setOut(originalSysout);

  String[] sout = baos.toString().split(System.lineSeparator());
  Collection<String> filter = Collections2.filter(Arrays.asList(sout), Predicates.containsPattern("Delta Record:"));

  String[] actualLines = filter.toArray(new String[filter.size()]);
  Assert.assertTrue(actualLines[0].contains("RowTime=Mon Feb 15 10:15:00 GMT 2016, Product=paint1"));
  Assert.assertTrue(actualLines[1].contains("RowTime=Mon Feb 15 10:16:00 GMT 2016, Product=paint2"));
  Assert.assertTrue(actualLines[2].contains("RowTime=Mon Feb 15 10:17:00 GMT 2016, Product=paint3"));
  Assert.assertTrue(actualLines[3].contains("RowTime=Mon Feb 15 10:18:00 GMT 2016, Product=paint4"));
  Assert.assertTrue(actualLines[4].contains("RowTime=Mon Feb 15 10:19:00 GMT 2016, Product=paint5"));
  Assert.assertTrue(actualLines[5].contains("RowTime=Mon Feb 15 10:10:00 GMT 2016, Product=abcde6"));
}
 
Example 9
Source File: ApplicationWithStreamAPITest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void testWordcountApplication() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  Configuration conf = new Configuration(false);
  conf.set("dt.application.WordCountStreamingApiDemo.operator.WCOutput.silent", "true");
  conf.set("dt.application.WordCountStreamingApiDemo.operator.WordOutput.silent", "true");
  lma.prepareDAG(new ApplicationWithStreamAPI(), conf);
  LocalMode.Controller lc = lma.getController();
  long start = System.currentTimeMillis();
  lc.run(5000);
  long end = System.currentTimeMillis();
  long time = end - start;
  logger.info("Test used " + time + " ms");
}
 
Example 10
Source File: ApplicationTest.java    From examples with Apache License 2.0 5 votes vote down vote up
@Test
public void testApplication() throws IOException, Exception {
  try {
    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties.xml"));
    lma.prepareDAG(new Application(), conf);
    LocalMode.Controller lc = lma.getController();
    lc.run(10000); // runs for 10 seconds and quits
  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }
}
 
Example 11
Source File: ApplicationTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void testApplication() throws IOException, Exception
{
  try {
    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties.xml"));
    lma.prepareDAG(new Application(), conf);
    LocalMode.Controller lc = lma.getController();
    lc.run(5000); // runs for 5 seconds and quits
  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }
}
 
Example 12
Source File: UniqueValueCountBenchmarkTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void testApplication() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  new UniqueValueCountBenchmarkApplication().populateDAG(lma.getDAG(), new Configuration(false));
  LocalMode.Controller lc = lma.getController();
  lc.run(10000);
}
 
Example 13
Source File: TwitterTopWordsTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
/**
 * This test requires twitter authentication setup and is skipped by default
 * (see {@link TwitterSampleInput}).
 *
 * @throws Exception
 */
@Test
public void testApplication() throws Exception
{
  TwitterTopWordsApplication app = new TwitterTopWordsApplication();
  Configuration conf = new Configuration(false);
  conf.addResource("dt-site-rollingtopwords.xml");
  LocalMode lma = LocalMode.newInstance();
  lma.prepareDAG(app, conf);
  LocalMode.Controller lc = lma.getController();
  lc.run(120000);
}
 
Example 14
Source File: ApplicationTest.java    From examples with Apache License 2.0 5 votes vote down vote up
private LocalMode.Controller asyncRun() throws Exception {
  Configuration conf = getConfig();
  LocalMode lma = LocalMode.newInstance();
  lma.prepareDAG(new SqsApplication(), conf);
  LocalMode.Controller lc = lma.getController();
  lc.runAsync();
  return lc;
}
 
Example 15
Source File: KafkaInputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
/**
  * Test AbstractKafkaSinglePortInputOperator (i.e. an input adapter for
  * Kafka, aka consumer). This module receives data from an outside test
  * generator through Kafka message bus and feed that data into Malhar
  * streaming platform.
  *
  * [Generate message and send that to Kafka message bus] ==> [Receive that
  * message through Kafka input adapter(i.e. consumer) and send using
  * emitTuples() interface on output port during onMessage call]
  *
  *
  * @throws Exception
  */
 public void testKafkaInputOperator(int sleepTime, final int totalCount, KafkaConsumer consumer, boolean isValid, boolean idempotent) throws Exception
 {
   // initial the latch for this test
   latch = new CountDownLatch(1);


// Start producer
   KafkaTestProducer p = new KafkaTestProducer(TEST_TOPIC);
   p.setSendCount(totalCount);
   new Thread(p).start();

   // Create DAG for testing.
   LocalMode lma = LocalMode.newInstance();
   DAG dag = lma.getDAG();



   // Create KafkaSinglePortStringInputOperator
   KafkaSinglePortStringInputOperator node = dag.addOperator("Kafka message consumer", KafkaSinglePortStringInputOperator.class);
   if (isSuicide) {
     // make some extreme assumptions to make it fail if checkpointing wrong offsets
     dag.setAttribute(Context.DAGContext.CHECKPOINT_WINDOW_COUNT, 1);
     dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, new FSStorageAgent("target/ck", new Configuration()));
     node.setMaxTuplesPerWindow(500);
   }

   if (idempotent) {
     node.setWindowDataManager(new FSWindowDataManager());
   }
   consumer.setTopic(TEST_TOPIC);

   node.setConsumer(consumer);

   consumer.setCacheSize(5000);

   if (isValid) {
     node.setZookeeper("localhost:" + KafkaOperatorTestBase.TEST_ZOOKEEPER_PORT[0]);
   }

   // Create Test tuple collector
   CollectorModule<String> collector = dag.addOperator("TestMessageCollector", new CollectorModule<String>());

   // Connect ports
   dag.addStream("Kafka message", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);

   // Create local cluster
   final LocalMode.Controller lc = lma.getController();
   lc.setHeartbeatMonitoringEnabled(false);

   lc.runAsync();

   // Wait 30s for consumer finish consuming all the messages
   Assert.assertTrue("TIMEOUT: 30s ", latch.await(300000, TimeUnit.MILLISECONDS));

   // Check results
   Assert.assertTrue("Expected count >= " + totalCount + "; Actual count " + tupleCount.intValue(),
       totalCount <= tupleCount.intValue());
   logger.debug(String.format("Number of emitted tuples: %d", tupleCount.intValue()));

   p.close();
   lc.shutdown();
 }
 
Example 16
Source File: ApplicationTest.java    From examples with Apache License 2.0 4 votes vote down vote up
@Test
public void testApplication() throws Exception {
  try {
    // produce some test data
    KafkaTestProducer p = new KafkaTestProducer(KAFKA_TOPIC);
    String[] words = "count the words from kafka and store them in the db".split("\\s+");
    p.setMessages(Lists.newArrayList(words));
    new Thread(p).start();

    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);
    conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties.xml"));
    conf.set("dt.operator.kafkaInput.prop.topic", KAFKA_TOPIC);
    conf.set("dt.operator.kafkaInput.prop.zookeeper", "localhost:" + KafkaOperatorTestBase.TEST_ZOOKEEPER_PORT[0]);
    conf.set("dt.operator.kafkaInput.prop.maxTuplesPerWindow", "1"); // consume one word per window
    conf.set("dt.operator.store.prop.store.databaseDriver", DB_DRIVER);
    conf.set("dt.operator.store.prop.store.databaseUrl", DB_URL);

    lma.prepareDAG(new Application(), conf);
    LocalMode.Controller lc = lma.getController();
    lc.runAsync(); // test will terminate after results are available

    HashSet<String> wordsSet = Sets.newHashSet(words);
    Connection con = DriverManager.getConnection(DB_URL);
    Statement stmt = con.createStatement();
    int rowCount = 0;
    long timeout = System.currentTimeMillis() + 30000; // 30s timeout
    while (rowCount < wordsSet.size() && timeout > System.currentTimeMillis()) {
      Thread.sleep(1000);
      String countQuery = "SELECT count(*) from " + TABLE_NAME;
      ResultSet resultSet = stmt.executeQuery(countQuery);
      resultSet.next();
      rowCount = resultSet.getInt(1);
      resultSet.close();
      LOG.info("current row count in {} is {}", TABLE_NAME, rowCount);
    }
    Assert.assertEquals("number of words", wordsSet.size(), rowCount);

    lc.shutdown();

  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }
}
 
Example 17
Source File: KafkaInputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
public void testInputOperator(boolean hasFailure, boolean idempotent) throws Exception
{
  // each broker should get a END_TUPLE message
  latch = new CountDownLatch(countDownAll ? totalCount + totalBrokers : totalBrokers);

  logger.info(
      "Test Case: name: {}; totalBrokers: {}; hasFailure: {}; hasMultiCluster: {};" +
      " hasMultiPartition: {}, partition: {}",
      testName, totalBrokers, hasFailure, hasMultiCluster, hasMultiPartition, partition);

  // Start producer
  KafkaTestProducer p = new KafkaTestProducer(testName, hasMultiPartition, hasMultiCluster);
  p.setSendCount(totalCount);
  Thread t = new Thread(p);
  t.start();

  int expectedReceiveCount = totalCount + totalBrokers;

  // Create DAG for testing.
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  // Create KafkaSinglePortStringInputOperator
  KafkaSinglePortInputOperator node = dag.addOperator(
      "Kafka input" + testName, KafkaSinglePortInputOperator.class);
  node.setInitialPartitionCount(1);
  // set topic
  node.setTopics(testName);
  node.setInitialOffset(AbstractKafkaInputOperator.InitialOffset.EARLIEST.name());
  node.setClusters(getClusterConfig());
  node.setStrategy(partition);
  if (idempotent) {
    node.setWindowDataManager(new FSWindowDataManager());
  }

  // Create Test tuple collector
  CollectorModule collector = dag.addOperator("TestMessageCollector", CollectorModule.class);
  collector.isIdempotentTest = idempotent;

  // Connect ports
  dag.addStream("Kafka message" + testName, node.outputPort, collector.inputPort)
      .setLocality(Locality.CONTAINER_LOCAL);

  if (hasFailure) {
    setupHasFailureTest(node, dag);
  }

  // Create local cluster
  LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(false);

  //let the Controller to run the inside another thread. It is almost same as call Controller.runAsync(),
  //but Controller.runAsync() don't expose the thread which run it,
  //so we don't know when the thread will be terminated.
  //create this thread and then call join() to make sure the Controller shutdown completely.
  monitorThread = new Thread((StramLocalCluster)lc, "master");
  monitorThread.start();

  boolean notTimeout = true;
  try {
    // Wait 60s for consumer finish consuming all the messages
    notTimeout = latch.await(waitTime, TimeUnit.MILLISECONDS);
    lc.shutdown();

    //wait until control thread finished.
    monitorThread.join();
  } catch (Exception e) {
    logger.warn(e.getMessage());
  }

  t.join();

  if (!notTimeout || expectedReceiveCount != tupleCollection.size()) {
    logger.info("Number of received/expected tuples: {}/{}, testName: {}, tuples: \n{}", tupleCollection.size(),
        expectedReceiveCount, testName, tupleCollection);
  }
  Assert.assertTrue("TIMEOUT. testName: " + this.testName + "; Collected data: "
      + tupleCollection, notTimeout);

  // Check results
  Assert.assertTrue("testName: " + testName + "; Collected tuple size: " + tupleCollection.size()
      + "; Expected tuple size: " + expectedReceiveCount + "; data: \n" + tupleCollection,
      expectedReceiveCount == tupleCollection.size());

  logger.info("End of test case: {}", testName);
}
 
Example 18
Source File: ApplicationTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
@Test
public void testIterationApp() throws Exception
{
  LocalMode lma = LocalMode.newInstance();
  Configuration conf = new Configuration(false);
  Application app = new Application();
  String outputFileName = "target/output.txt";
  long timeout = 10 * 1000; // 10 seconds

  new File(outputFileName).delete();
  app.setExtraOutputFileName(outputFileName);
  lma.prepareDAG(app, conf);
  LocalMode.Controller lc = lma.getController();
  lc.runAsync();

  long startTime = System.currentTimeMillis();
  do {
    try {
      Thread.sleep(500);
    } catch (InterruptedException ex) {
      break;
    }
    File file = new File(outputFileName);
    if (file.length() > 50) {
      break;
    }
  }
  while (System.currentTimeMillis() - startTime < timeout);

  lc.shutdown();
  try (BufferedReader br = new BufferedReader(new FileReader(outputFileName))) {
    Assert.assertEquals("1", br.readLine());
    Assert.assertEquals("1", br.readLine());
    Assert.assertEquals("2", br.readLine());
    Assert.assertEquals("3", br.readLine());
    Assert.assertEquals("5", br.readLine());
    Assert.assertEquals("8", br.readLine());
    Assert.assertEquals("13", br.readLine());
    Assert.assertEquals("21", br.readLine());
    Assert.assertEquals("34", br.readLine());
    Assert.assertEquals("55", br.readLine());
    Assert.assertEquals("89", br.readLine());
    Assert.assertEquals("144", br.readLine());
    Assert.assertEquals("233", br.readLine());
    Assert.assertEquals("377", br.readLine());
    Assert.assertEquals("610", br.readLine());
    Assert.assertEquals("987", br.readLine());
  }
}
 
Example 19
Source File: MemsqlInputBenchmarkTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
@Test
public void testMethod() throws SQLException, IOException
{
  Configuration conf = new Configuration();
  InputStream inputStream = new FileInputStream("src/site/conf/dt-site-memsql.xml");
  conf.addResource(inputStream);

  MemsqlStore memsqlStore = new MemsqlStore();
  memsqlStore.setDatabaseUrl(conf.get("dt.rootDbUrl"));
  memsqlStore.setConnectionProperties(
      conf.get("dt.application.MemsqlInputBenchmark.operator.memsqlInputOperator.store.connectionProperties"));

  AbstractMemsqlOutputOperatorTest.memsqlInitializeDatabase(memsqlStore);

  MemsqlPOJOOutputOperator outputOperator = new MemsqlPOJOOutputOperator();
  outputOperator.getStore().setDatabaseUrl(
      conf.get("dt.application.MemsqlInputBenchmark.operator.memsqlInputOperator.store.dbUrl"));
  outputOperator.getStore().setConnectionProperties(
      conf.get("dt.application.MemsqlInputBenchmark.operator.memsqlInputOperator.store.connectionProperties"));
  outputOperator.setBatchSize(BATCH_SIZE);

  Random random = new Random();
  com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap attributeMap =
      new com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap();
  attributeMap.put(OperatorContext.PROCESSING_MODE, ProcessingMode.AT_LEAST_ONCE);
  attributeMap.put(OperatorContext.ACTIVATION_WINDOW_ID, -1L);
  attributeMap.put(DAG.APPLICATION_ID, APP_ID);
  OperatorContext context = mockOperatorContext(OPERATOR_ID, attributeMap);

  long seedSize = conf.getLong("dt.seedSize", SEED_SIZE);

  outputOperator.setup(context);
  outputOperator.beginWindow(0);

  for (long valueCounter = 0;
      valueCounter < seedSize;
      valueCounter++) {
    outputOperator.input.put(random.nextInt());
  }

  outputOperator.endWindow();
  outputOperator.teardown();

  MemsqlInputBenchmark app = new MemsqlInputBenchmark();
  LocalMode lm = LocalMode.newInstance();

  try {
    lm.prepareDAG(app, conf);
    LocalMode.Controller lc = lm.getController();
    lc.run(20000);
  } catch (Exception ex) {
    DTThrowable.rethrow(ex);
  }

  IOUtils.closeQuietly(inputStream);
}
 
Example 20
Source File: OffsetManagerTest.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
public void testPartitionableInputOperator(KafkaConsumer consumer, int expectedCount) throws Exception
{
  // Set to 3 because we want to make sure END_TUPLE from both 2 partitions are received and offsets has been updated to 102
  latch = new CountDownLatch(3);

  // Start producer
  KafkaTestProducer p = new KafkaTestProducer(TEST_TOPIC, true);
  p.setProducerType("sync");
  p.setSendCount(totalCount);
  // wait the producer send all messages
  p.run();
  p.close();

  // Create DAG for testing.
  LocalMode lma = LocalMode.newInstance();
  DAG dag = lma.getDAG();

  // Create KafkaSinglePortStringInputOperator
  KafkaSinglePortStringInputOperator node = dag.addOperator("Kafka message consumer", KafkaSinglePortStringInputOperator.class);


  TestOffsetManager tfm = new TestOffsetManager();

  tfm.setFilename(TEST_TOPIC + OFFSET_FILE);

  node.setInitialPartitionCount(1);
  node.setOffsetManager(tfm);
  node.setStrategy(PartitionStrategy.ONE_TO_MANY.toString());
  node.setRepartitionInterval(-1);

  //set topic
  consumer.setTopic(TEST_TOPIC);
  //set the zookeeper list used to initialize the partition
  SetMultimap<String, String> zookeeper = HashMultimap.create();
  String zks = "localhost:" + KafkaOperatorTestBase.TEST_ZOOKEEPER_PORT[0];
  consumer.setZookeeper(zks);
  consumer.setInitialOffset("earliest");

  node.setConsumer(consumer);

  // Create Test tuple collector
  CollectorModule collector = dag.addOperator("TestMessageCollector", new CollectorModule());

  // Connect ports
  dag.addStream("Kafka message", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);

  dag.setAttribute(Context.DAGContext.CHECKPOINT_WINDOW_COUNT, 1);

  // Create local cluster
  final LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(true);

  lc.runAsync();



  boolean isNotTimeout = latch.await(30000, TimeUnit.MILLISECONDS);
  // Wait 30s for consumer finish consuming all the messages and offsets has been updated to 100
  assertTrue("TIMEOUT: 30s, collected " + collectedTuples.size() + " tuples", isNotTimeout);

  // Check results
  assertEquals("Tuple count " + collectedTuples, expectedCount, collectedTuples.size());
  logger.debug(String.format("Number of emitted tuples: %d", collectedTuples.size()));

  p.close();
  lc.shutdown();
}