com.datatorrent.common.partitioner.StatelessPartitioner Java Examples

The following examples show how to use com.datatorrent.common.partitioner.StatelessPartitioner. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DeduperPartitioningTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Override
public void populateDAG(DAG dag, Configuration conf)
{
  TestGenerator gen = dag.addOperator("Generator", new TestGenerator());

  dedup = dag.addOperator("Deduper", new TestDeduper());
  dedup.setKeyExpression("id");
  dedup.setTimeExpression("eventTime.getTime()");
  dedup.setBucketSpan(60);
  dedup.setExpireBefore(600);

  ConsoleOutputOperator console = dag.addOperator("Console", new ConsoleOutputOperator());
  dag.addStream("Generator to Dedup", gen.output, dedup.input);
  dag.addStream("Dedup to Console", dedup.unique, console.input);
  dag.setInputPortAttribute(dedup.input, Context.PortContext.TUPLE_CLASS, TestEvent.class);
  dag.setOutputPortAttribute(dedup.unique, Context.PortContext.TUPLE_CLASS, TestEvent.class);
  dag.setAttribute(dedup, Context.OperatorContext.PARTITIONER,
      new StatelessPartitioner<TimeBasedDedupOperator>(NUM_DEDUP_PARTITIONS));
}
 
Example #2
Source File: AutoMetricTest.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
@Test
@Ignore
public void testMetricsAggregations() throws Exception
{
  CountDownLatch latch = new CountDownLatch(2);

  LogicalPlanConfiguration lpc = new LogicalPlanConfiguration(new Configuration());

  TestGeneratorInputOperator inputOperator = dag.addOperator("input", TestGeneratorInputOperator.class);

  OperatorWithMetrics o1 = dag.addOperator("o1", OperatorWithMetrics.class);
  MockAggregator aggregator = new MockAggregator(latch);
  dag.setOperatorAttribute(o1, Context.OperatorContext.METRICS_AGGREGATOR, aggregator);

  dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());
  dag.setOperatorAttribute(o1, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<TestGeneratorInputOperator>(2));

  dag.addStream("TestTuples", inputOperator.outport, o1.inport1);

  lpc.prepareDAG(dag, null, "AutoMetricTest");
  StramLocalCluster lc = new StramLocalCluster(dag);
  lc.runAsync();
  latch.await();
  Assert.assertEquals("progress", 2L, ((Long)aggregator.result.get("progress")).longValue());
  lc.shutdown();
}
 
Example #3
Source File: POJOPartitionJoinOperatorTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Override
public void populateDAG(DAG dag, Configuration configuration)
{
  TestGenerator gen1 = dag.addOperator("Generator1", new TestGenerator());
  TestGenerator gen2 = dag.addOperator("Generator2", new TestGenerator());

  joinOp = dag.addOperator("Join", new PartitionTestJoinOperator());
  joinOp.setLeftKeyExpression("id");
  joinOp.setRightKeyExpression("id");
  joinOp.setIncludeFieldStr("id,eventTime;id,eventTime");
  joinOp.setExpiryTime(10000L);

  ConsoleOutputOperator console = dag.addOperator("Console", new ConsoleOutputOperator());

  dag.addStream("Gen1ToJoin", gen1.output, joinOp.input1);
  dag.addStream("Gen2ToJoin", gen2.output, joinOp.input2);
  dag.addStream("JoinToConsole", joinOp.outputPort, console.input);
  dag.setInputPortAttribute(joinOp.input1, DAG.InputPortMeta.TUPLE_CLASS,TestEvent.class);
  dag.setInputPortAttribute(joinOp.input2, DAG.InputPortMeta.TUPLE_CLASS,TestEvent.class);
  dag.setOutputPortAttribute(joinOp.outputPort, DAG.InputPortMeta.TUPLE_CLASS,TestEvent.class);
  dag.setAttribute(joinOp, Context.OperatorContext.PARTITIONER,
      new StatelessPartitioner<PartitionTestJoinOperator>(NUM_OF_PARTITIONS));
}
 
Example #4
Source File: StatelessPartitionerTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testParallelPartitionScaleDown()
{
  DummyOperator dummyOperator = new DummyOperator(5);
  StatelessPartitioner<DummyOperator> statelessPartitioner = new StatelessPartitioner<DummyOperator>();

  TestUtils.MockBatchedOperatorStats mockStats = new TestUtils.MockBatchedOperatorStats(2);
  mockStats.operatorStats = Lists.newArrayList();

  Collection<Partition<DummyOperator>> partitions = Lists.newArrayList();

  for (int i = 5; i-- > 0; ) {
    DefaultPartition<DummyOperator> defaultPartition = new DefaultPartition<DummyOperator>(dummyOperator);
    TestUtils.MockPartition<DummyOperator> mockPartition = new TestUtils.MockPartition<DummyOperator>(defaultPartition, mockStats);

    partitions.add(mockPartition);
  }

  Collection<Partition<DummyOperator>> newPartitions = statelessPartitioner.definePartitions(partitions,
      new PartitioningContextImpl(null, 1));
  Assert.assertEquals("after partition", 1, newPartitions.size());
}
 
Example #5
Source File: StatelessPartitionerTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testParallelPartitionScaleUP()
{
  DummyOperator dummyOperator = new DummyOperator(5);
  StatelessPartitioner<DummyOperator> statelessPartitioner = new StatelessPartitioner<DummyOperator>();

  TestUtils.MockBatchedOperatorStats mockStats = new TestUtils.MockBatchedOperatorStats(2);
  mockStats.operatorStats = Lists.newArrayList();

  Collection<Partition<DummyOperator>> partitions = Lists.newArrayList();

  DefaultPartition<DummyOperator> defaultPartition = new DefaultPartition<DummyOperator>(dummyOperator);
  TestUtils.MockPartition<DummyOperator> mockPartition = new TestUtils.MockPartition<DummyOperator>(defaultPartition, mockStats);

  partitions.add(mockPartition);

  Collection<Partition<DummyOperator>> newPartitions = statelessPartitioner.definePartitions(partitions,
      new PartitioningContextImpl(null, 5));
  Assert.assertEquals("after partition", 5, newPartitions.size());
}
 
Example #6
Source File: StatelessPartitionerTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void partition5Test()
{
  DummyOperator dummyOperator = new DummyOperator(5);
  StatelessPartitioner<DummyOperator> statelessPartitioner = new StatelessPartitioner<DummyOperator>(5);

  Collection<Partition<DummyOperator>> partitions = Lists.newArrayList();
  DefaultPartition<DummyOperator> defaultPartition = new DefaultPartition<DummyOperator>(dummyOperator);
  partitions.add(defaultPartition);

  Collection<Partition<DummyOperator>> newPartitions = statelessPartitioner.definePartitions(partitions, new PartitioningContextImpl(null, 0));
  Assert.assertEquals("Incorrect number of partitions", 5, newPartitions.size());

  for (Partition<DummyOperator> partition : newPartitions) {
    Assert.assertEquals("Incorrect cloned value", 5, partition.getPartitionedInstance().getValue());
  }
}
 
Example #7
Source File: StatelessPartitionerTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void partition1Test()
{
  DummyOperator dummyOperator = new DummyOperator(5);
  StatelessPartitioner<DummyOperator> statelessPartitioner = new StatelessPartitioner<DummyOperator>();

  Collection<Partition<DummyOperator>> partitions = Lists.newArrayList();
  DefaultPartition<DummyOperator> defaultPartition = new DefaultPartition<DummyOperator>(dummyOperator);
  partitions.add(defaultPartition);

  Collection<Partition<DummyOperator>> newPartitions = statelessPartitioner.definePartitions(partitions, new PartitioningContextImpl(null, 0));
  Assert.assertEquals("Incorrect number of partitions", 1, newPartitions.size());

  for (Partition<DummyOperator> partition : newPartitions) {
    Assert.assertEquals("Incorrect cloned value", 5, partition.getPartitionedInstance().getValue());
  }
}
 
Example #8
Source File: StreamingContainerManagerTest.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
private void testDownStreamPartition(Locality locality) throws Exception
{
  TestGeneratorInputOperator o1 = dag.addOperator("o1", TestGeneratorInputOperator.class);
  GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
  dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));
  dag.addStream("o1Output1", o1.outport, o2.inport1).setLocality(locality);

  int maxContainers = 5;
  dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, maxContainers);
  dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());
  dag.validate();
  PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
  Assert.assertEquals("number of containers", 1, plan.getContainers().size());

  PTContainer container1 = plan.getContainers().get(0);
  Assert.assertEquals("number operators " + container1, 3, container1.getOperators().size());
  StramLocalCluster slc = new StramLocalCluster(dag);
  slc.run(5000);
}
 
Example #9
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
@Test
public void testParallelPartitionForSlidingWindow()
{
  LogicalPlan dag = new LogicalPlan();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());

  GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
  GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
  GenericTestOperator o3 = dag.addOperator("o3", GenericTestOperator.class);
  dag.setOperatorAttribute(o1, OperatorContext.SLIDE_BY_WINDOW_COUNT, 2);
  dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, new StatelessPartitioner<>(2));
  dag.setInputPortAttribute(o2.inport1, PortContext.PARTITION_PARALLEL, true);
  dag.setOperatorAttribute(o1, OperatorContext.APPLICATION_WINDOW_COUNT, 4);

  dag.addStream("o1.outport1", o1.outport1, o2.inport1);
  dag.addStream("o2.outport1", o2.outport1, o3.inport1);
  PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
  Assert.assertEquals("number of containers", 7, plan.getContainers().size());
}
 
Example #10
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
@Test
public void testNumberOfUnifiers()
{
  LogicalPlan dag = new LogicalPlan();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());
  GenericTestOperator node1 = dag.addOperator("node1", GenericTestOperator.class);
  GenericTestOperator node2 = dag.addOperator("node2", GenericTestOperator.class);
  dag.addStream("node1.outport1", node1.outport1, node2.inport1);
  dag.setOperatorAttribute(node1, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(5));
  dag.setOutputPortAttribute(node1.outport1, PortContext.UNIFIER_LIMIT, 3);
  PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
  List<PTContainer> containers = plan.getContainers();
  int unifierCount = 0;
  int totalOperators = 0;
  for (PTContainer container : containers) {
    List<PTOperator> operators = container.getOperators();
    for (PTOperator operator : operators) {
      totalOperators++;
      if (operator.isUnifier()) {
        unifierCount++;
      }
    }
  }
  Assert.assertEquals("Number of operators", 8, totalOperators);
  Assert.assertEquals("Number of unifiers", 2, unifierCount);
}
 
Example #11
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
@Test
public void testNumberOfUnifiersWithEvenPartitions()
{
  LogicalPlan dag = new LogicalPlan();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());
  GenericTestOperator node1 = dag.addOperator("node1", GenericTestOperator.class);
  GenericTestOperator node2 = dag.addOperator("node2", GenericTestOperator.class);
  dag.addStream("node1.outport1", node1.outport1, node2.inport1);
  dag.setOperatorAttribute(node1, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(8));
  dag.setOutputPortAttribute(node1.outport1, PortContext.UNIFIER_LIMIT, 4);
  PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
  List<PTContainer> containers = plan.getContainers();
  int unifierCount = 0;
  int totalOperators = 0;
  for (PTContainer container : containers) {
    List<PTOperator> operators = container.getOperators();
    for (PTOperator operator : operators) {
      totalOperators++;
      if (operator.isUnifier()) {
        unifierCount++;
      }
    }
  }
  Assert.assertEquals("Number of operators", 12, totalOperators);
  Assert.assertEquals("Number of unifiers", 3, unifierCount);
}
 
Example #12
Source File: UniqueKeyValCountExample.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Override
public void populateDAG(DAG dag, Configuration entries)
{
      /* Generate random key-value pairs */
  RandomDataGenerator randGen = dag.addOperator("randomgen", new RandomDataGenerator());

      /* Initialize with three partition to start with */
  UniqueCounter<KeyValPair<String, Object>> uniqCount =
      dag.addOperator("uniqevalue", new UniqueCounter<KeyValPair<String, Object>>());
  MapToKeyHashValuePairConverter<KeyValPair<String, Object>, Integer> converter = dag.addOperator("converter", new MapToKeyHashValuePairConverter());
  uniqCount.setCumulative(false);
  dag.setAttribute(randGen, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<UniqueCounter<KeyValPair<String, Object>>>(3));

  ConsoleOutputOperator output = dag.addOperator("output", new ConsoleOutputOperator());

  dag.addStream("datain", randGen.outPort, uniqCount.data);
  dag.addStream("convert", uniqCount.count, converter.input).setLocality(Locality.THREAD_LOCAL);
  dag.addStream("consoutput", converter.output, output.input);
}
 
Example #13
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
@Test
public void testMxNPartitionForSlidingWindow()
{
  LogicalPlan dag = new LogicalPlan();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());

  GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
  GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
  GenericTestOperator o3 = dag.addOperator("o3", GenericTestOperator.class);
  dag.setOperatorAttribute(o1, OperatorContext.APPLICATION_WINDOW_COUNT, 4);
  dag.setOperatorAttribute(o1, OperatorContext.SLIDE_BY_WINDOW_COUNT, 2);
  dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, new StatelessPartitioner<>(2));
  dag.getOperatorMeta("o1").getMeta(o1.outport1).getUnifierMeta().getAttributes().put(OperatorContext.MEMORY_MB, 1024);
  dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<>(2));
  dag.setOperatorAttribute(o2, OperatorContext.SLIDE_BY_WINDOW_COUNT, 2);
  dag.setOperatorAttribute(o2, OperatorContext.APPLICATION_WINDOW_COUNT, 4);

  dag.addStream("o1.outport1", o1.outport1, o2.inport1);
  dag.addStream("o2.outport1", o2.outport1, o3.inport1);
  PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
  Assert.assertEquals("number of containers", 9, plan.getContainers().size());
}
 
Example #14
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
@Test
public void testContainerSizeWithPartitioning()
{
  LogicalPlan dag = new LogicalPlan();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());

  GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
  GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
  dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(3));
  dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));

  dag.addStream("o1.outport1", o1.outport1, o2.inport1);
  dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, 10);
  PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
  Assert.assertEquals("number of containers", 5, plan.getContainers().size());
  PTContainer container;
  for (int i = 0; i < 5; i++) {
    container = plan.getContainers().get(i);
    if (container.getOperators().size() == 1) {
      Assert.assertEquals("container memory is 1536 for container :" + container, 1536, container.getRequiredMemoryMB());
    }
    if (container.getOperators().size() == 2) {
      Assert.assertEquals("container memory is 2048 for container :" + container, 2048, container.getRequiredMemoryMB());
    }
  }
}
 
Example #15
Source File: ApplicationWithGenerator.java    From streaming-benchmarks with Apache License 2.0 5 votes vote down vote up
@Override
public void populateDAG(DAG dag, Configuration configuration)
{
   // Create operators for each step
   // settings are applied by the platform using the config file.
  JsonGenerator eventGenerator = dag.addOperator("eventGenerator", new JsonGenerator());
  FilterTuples filterTuples = dag.addOperator("filterTuples", new FilterTuples());
  FilterFields filterFields = dag.addOperator("filterFields", new FilterFields());
  RedisJoin redisJoin = dag.addOperator("redisJoin", new RedisJoin());
  CampaignProcessor campaignProcessor = dag.addOperator("campaignProcessor", new CampaignProcessor());

  eventGenerator.setNumAdsPerCampaign(Integer.parseInt(configuration.get("numberOfAds")));
  eventGenerator.setNumCampaigns(Integer.parseInt(configuration.get("numberOfCampaigns")));
  setupRedis(eventGenerator.getCampaigns(), configuration.get("redis"));

  // Connect the Ports in the Operators
  dag.addStream("filterTuples", eventGenerator.out, filterTuples.input).setLocality(DAG.Locality.CONTAINER_LOCAL);
  dag.addStream("filterFields", filterTuples.output, filterFields.input).setLocality(DAG.Locality.CONTAINER_LOCAL);
  dag.addStream("redisJoin", filterFields.output, redisJoin.input).setLocality(DAG.Locality.CONTAINER_LOCAL);
  dag.addStream("output", redisJoin.output, campaignProcessor.input);

  dag.setInputPortAttribute(filterTuples.input, Context.PortContext.PARTITION_PARALLEL, true);
  dag.setInputPortAttribute(filterFields.input, Context.PortContext.PARTITION_PARALLEL, true);
  dag.setInputPortAttribute(redisJoin.input, Context.PortContext.PARTITION_PARALLEL, true);

  dag.setAttribute(eventGenerator, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<EventGenerator>(8));
  dag.setAttribute(campaignProcessor, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<CampaignProcessor>(8));
}
 
Example #16
Source File: ApexProcessingItem.java    From incubator-samoa with Apache License 2.0 5 votes vote down vote up
@Override
protected ProcessingItem addInputStream(Stream inputStream, PartitioningScheme scheme) {
  ApexStream apexStream = (ApexStream) inputStream;
  this.operator.addInputStream(apexStream);
  dag.addStream(apexStream.getStreamId(), apexStream.outputPort, apexStream.inputPort);

  // Setup stream codecs here
  switch (scheme) {
  case SHUFFLE:
    dag.setInputPortAttribute(apexStream.inputPort, Context.PortContext.STREAM_CODEC,
        new ApexStreamUtils.RandomStreamCodec<ContentEvent>());
    break;
  case BROADCAST:
    dag.setAttribute(this.operator, Context.OperatorContext.PARTITIONER,
        new ApexStreamUtils.AllPartitioner<ApexOperator>(numPartitions));
    dag.setInputPortAttribute(apexStream.inputPort, Context.PortContext.STREAM_CODEC,
        new ApexStreamUtils.JavaSerializationStreamCodec<ContentEvent>());
    break;
  case GROUP_BY_KEY:
    dag.setInputPortAttribute(apexStream.inputPort, Context.PortContext.STREAM_CODEC,
        new ApexStreamUtils.KeyBasedStreamCodec<ContentEvent>());
    break;
  default:
    // Should never occur
    throw new RuntimeException("Unknown partitioning scheme");
  }

  if (!dag.getAttributes().contains(Context.OperatorContext.PARTITIONER)) {
    dag.setAttribute(this.operator, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<>(numPartitions));
  }
  return this;
}
 
Example #17
Source File: ApplicationWithDCWithoutDeserializer.java    From streaming-benchmarks with Apache License 2.0 5 votes vote down vote up
public DefaultOutputPort<DimensionTuple> populateUpstreamDAG(DAG dag, Configuration configuration)
{
  JsonGenerator eventGenerator = dag.addOperator("eventGenerator", new JsonGenerator());
  FilterTuples filterTuples = dag.addOperator("filterTuples", new FilterTuples());
  FilterFields filterFields = dag.addOperator("filterFields", new FilterFields());
  
  // Connect the Ports in the Operators
  dag.addStream("filterTuples", eventGenerator.out, filterTuples.input).setLocality(DAG.Locality.CONTAINER_LOCAL);
  dag.addStream("filterFields", filterTuples.output, filterFields.input).setLocality(DAG.Locality.CONTAINER_LOCAL);
  
  TupleToDimensionTupleConverter converter = dag.addOperator("converter", new TupleToDimensionTupleConverter());
  
  if(includeRedisJoin) {
    RedisJoin redisJoin = dag.addOperator("redisJoin", new RedisJoin());
    dag.addStream("redisJoin", filterFields.output, redisJoin.input).setLocality(DAG.Locality.CONTAINER_LOCAL);
    dag.addStream("converter", redisJoin.output, converter.inputPort).setLocality(DAG.Locality.CONTAINER_LOCAL);

    dag.setInputPortAttribute(redisJoin.input, Context.PortContext.PARTITION_PARALLEL, true);

    setupRedis(eventGenerator.getCampaigns());
  } else {
    dag.addStream("convert", filterFields.output, converter.inputPort).setLocality(DAG.Locality.CONTAINER_LOCAL);
  }
  

  dag.setInputPortAttribute(filterTuples.input, Context.PortContext.PARTITION_PARALLEL, true);
  dag.setInputPortAttribute(filterFields.input, Context.PortContext.PARTITION_PARALLEL, true);
  dag.setInputPortAttribute(converter.inputPort, Context.PortContext.PARTITION_PARALLEL, true);

  dag.setAttribute(eventGenerator, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<EventGenerator>(PARTITION_NUM));

  return converter.outputPort;
}
 
Example #18
Source File: ApplicationDimensionComputation.java    From streaming-benchmarks with Apache License 2.0 5 votes vote down vote up
@Override
public void populateDAG(DAG dag, Configuration configuration) 
{
  DimensionTupleGenerateOperator generateOperator = new DimensionTupleGenerateOperator();
  dag.addOperator("Generator", generateOperator);
  dag.setAttribute(generateOperator, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<EventGenerator>(PARTITION_NUM));
  
  populateDimensionsDAG(dag, configuration, generateOperator.outputPort);
}
 
Example #19
Source File: UniqueValueCountBenchmarkApplication.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Override
public void populateDAG(DAG dag, Configuration entries)
{
  dag.setAttribute(dag.APPLICATION_NAME, "UniqueValueCountDemo");
  dag.setAttribute(dag.DEBUG, true);


  /* Generate random key-value pairs */
  RandomEventGenerator randGen = dag.addOperator("randomgen", new RandomEventGenerator());
  randGen.setMaxvalue(999999);
  randGen.setTuplesBlastIntervalMillis(50);
  dag.setAttribute(randGen, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<RandomEventGenerator>(3));

  /* Initialize with three partition to start with */
  UniqueCounter<Integer> uniqCount = dag.addOperator("uniqevalue", new UniqueCounter<Integer>());
  MapToKeyHashValuePairConverter<Integer, Integer> converter =
      dag.addOperator("converter", new MapToKeyHashValuePairConverter());

  dag.setAttribute(uniqCount, Context.OperatorContext.PARTITIONER,
      new StatelessPartitioner<UniqueCounter<Integer>>(3));
  dag.setInputPortAttribute(uniqCount.data, Context.PortContext.PARTITION_PARALLEL, true);
  uniqCount.setCumulative(false);

  Counter counter = dag.addOperator("count", new Counter());
  ConsoleOutputOperator output = dag.addOperator("output", new ConsoleOutputOperator());

  dag.addStream("datain", randGen.integer_data, uniqCount.data);
  dag.addStream("convert", uniqCount.count, converter.input).setLocality(Locality.THREAD_LOCAL);
  dag.addStream("consoutput", converter.output, counter.input);
  dag.addStream("final", counter.output, output.input);
}
 
Example #20
Source File: Application.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Override
public void populateDAG(DAG dag, Configuration entries)
{
      /* Generate random key-value pairs */
  RandomKeysGenerator randGen = dag.addOperator("randomgen", new RandomKeysGenerator());


      /* Initialize with three partition to start with */
  // UniqueCount1 uniqCount = dag.addOperator("uniqevalue", new UniqueCount1());
  UniqueCounter<Integer> uniqCount = dag.addOperator("uniqevalue", new UniqueCounter<Integer>());

  MapToKeyHashValuePairConverter<Integer, Integer> converter = dag.addOperator("converter", new MapToKeyHashValuePairConverter());

  uniqCount.setCumulative(false);
  dag.setAttribute(uniqCount, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<UniqueCounter<Integer>>(3));

  CountVerifier<Integer> verifier = dag.addOperator("verifier", new CountVerifier<Integer>());
  StreamDuplicater<KeyHashValPair<Integer, Integer>> dup = dag.addOperator("dup", new StreamDuplicater<KeyHashValPair<Integer, Integer>>());
  ConsoleOutputOperator output = dag.addOperator("output", new ConsoleOutputOperator());

  ConsoleOutputOperator successOutput = dag.addOperator("successoutput", new ConsoleOutputOperator());
  successOutput.setStringFormat("Success %d");
  ConsoleOutputOperator failureOutput = dag.addOperator("failureoutput", new ConsoleOutputOperator());
  failureOutput.setStringFormat("Failure %d");

  // success and failure counters.
  Counter successcounter = dag.addOperator("successcounter", new Counter());
  Counter failurecounter = dag.addOperator("failurecounter", new Counter());

  dag.addStream("datain", randGen.outPort, uniqCount.data);
  dag.addStream("dataverification0", randGen.verificationPort, verifier.in1);
  dag.addStream("convert", uniqCount.count, converter.input).setLocality(Locality.THREAD_LOCAL);
  dag.addStream("split", converter.output, dup.data);
  dag.addStream("consoutput", dup.out1, output.input);
  dag.addStream("dataverification1", dup.out2, verifier.in2);
  dag.addStream("successc", verifier.successPort, successcounter.input);
  dag.addStream("failurec", verifier.failurePort, failurecounter.input);
  dag.addStream("succconsoutput", successcounter.output, successOutput.input);
  dag.addStream("failconsoutput", failurecounter.output, failureOutput.input);
}
 
Example #21
Source File: StatelessPartitionerTest.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Test
public void objectPropertyTest()
{
  Object2String<StatelessPartitioner<DummyOperator>> propertyReader = new Object2String<StatelessPartitioner<DummyOperator>>();
  StatelessPartitioner<DummyOperator> partitioner = propertyReader.fromString(StatelessPartitioner.class.getName() + ":3");
  Assert.assertEquals(3, partitioner.getPartitionCount());
}
 
Example #22
Source File: StreamCodecTest.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
@Test
public void testMxNMultipleStreamCodecs()
{
  GenericTestOperator node1 = dag.addOperator("node1", GenericTestOperator.class);
  dag.setOperatorAttribute(node1, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));
  GenericTestOperator node2 = dag.addOperator("node2", GenericTestOperator.class);
  dag.setOperatorAttribute(node2, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(3));
  TestStreamCodec serDe = new TestStreamCodec();
  dag.setInputPortAttribute(node2.inport1, Context.PortContext.STREAM_CODEC, serDe);
  GenericTestOperator node3 = dag.addOperator("node3", GenericTestOperator.class);
  dag.setOperatorAttribute(node3, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(3));
  TestStreamCodec serDe2 = new TestStreamCodec();
  dag.setInputPortAttribute(node3.inport1, Context.PortContext.STREAM_CODEC, serDe2);

  dag.addStream("n1n2n3", node1.outport1, node2.inport1, node3.inport1);

  dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, Integer.MAX_VALUE);
  StramTestSupport.MemoryStorageAgent msa = new StramTestSupport.MemoryStorageAgent();
  dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, msa);

  StreamingContainerManager dnm = new StreamingContainerManager(dag);
  PhysicalPlan plan = dnm.getPhysicalPlan();

  List<PTContainer> containers = plan.getContainers();

  for (int i = 0; i < containers.size(); ++i) {
    StreamingContainerManagerTest.assignContainer(dnm, "container" + (i + 1));
  }

  LogicalPlan.OperatorMeta n1meta = dag.getMeta(node1);
  LogicalPlan.OperatorMeta n2meta = dag.getMeta(node2);
  LogicalPlan.OperatorMeta n3meta = dag.getMeta(node3);

  // Sanity check that physical operators have been allocated for n1meta and n2meta
  Assert.assertEquals("number operators " + n1meta.getName(), 2, plan.getOperators(n1meta).size());
  Assert.assertEquals("number operators " + n2meta.getName(), 3, plan.getOperators(n2meta).size());
  Assert.assertEquals("number operators " + n3meta.getName(), 3, plan.getOperators(n3meta).size());

  checkMxNStreamCodecs(node1, node2, node3, dnm);
}
 
Example #23
Source File: OutputUnifiedTest.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
@Test
public void testParallelPartition() throws Exception
{
  TestInputOperator i1 = new TestInputOperator();
  dag.addOperator("i1", i1);

  dag.setOperatorAttribute(i1, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));

  GenericTestOperator op1 = new GenericTestOperator();
  dag.addOperator("op1", op1);

  dag.setInputPortAttribute(op1.inport1, PortContext.PARTITION_PARALLEL, true);

  TestOutputOperator op2 = new TestOutputOperator();
  dag.addOperator("op2", op2);

  dag.addStream("s1", i1.output, op1.inport1);
  dag.addStream("s2", op1.outport1, op2.inport);

  StreamingContainerManager scm = new StreamingContainerManager(dag);
  PhysicalPlan physicalPlan = scm.getPhysicalPlan();
  List<PTContainer> containers = physicalPlan.getContainers();
  Assert.assertEquals("Number of containers", 5, containers.size());

  assignContainers(scm, containers);

  testOutputAttribute(dag, i1, scm, physicalPlan, false);
  testOutputAttribute(dag, op1, scm, physicalPlan, true);
}
 
Example #24
Source File: OutputUnifiedTest.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
@Test
public void testMxNPartition() throws Exception
{
  TestInputOperator i1 = new TestInputOperator();
  dag.addOperator("i1", i1);

  GenericTestOperator op1 = new GenericTestOperator();
  dag.addOperator("op1", op1);

  dag.setOperatorAttribute(op1, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(3));

  TestOutputOperator op2 = new TestOutputOperator();
  dag.addOperator("op2", op2);

  dag.setOperatorAttribute(op2, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));

  dag.addStream("s1", i1.output, op1.inport1);
  dag.addStream("s2", op1.outport1, op2.inport);

  StreamingContainerManager scm = new StreamingContainerManager(dag);
  PhysicalPlan physicalPlan = scm.getPhysicalPlan();
  List<PTContainer> containers = physicalPlan.getContainers();
  Assert.assertEquals("Number of containers", 6, containers.size());

  assignContainers(scm, containers);

  testOutputAttribute(dag, i1, scm, physicalPlan, false);
  testOutputAttribute(dag, op1, scm, physicalPlan, true);

}
 
Example #25
Source File: OutputUnifiedTest.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
@Test
public void testManyToOnePartition() throws Exception
{
  TestInputOperator i1 = new TestInputOperator();
  dag.addOperator("i1", i1);

  GenericTestOperator op1 = new GenericTestOperator();
  dag.addOperator("op1", op1);

  dag.setOperatorAttribute(op1, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(3));

  TestOutputOperator op2 = new TestOutputOperator();
  dag.addOperator("op2", op2);

  dag.addStream("s1", i1.output, op1.inport1);
  dag.addStream("s2", op1.outport1, op2.inport);

  StreamingContainerManager scm = new StreamingContainerManager(dag);
  PhysicalPlan physicalPlan = scm.getPhysicalPlan();
  List<PTContainer> containers = physicalPlan.getContainers();
  Assert.assertEquals("Number of containers", 5, containers.size());

  assignContainers(scm, containers);

  testOutputAttribute(dag, i1, scm, physicalPlan, false);
  testOutputAttribute(dag, op1, scm, physicalPlan, true);

}
 
Example #26
Source File: CustomControlTupleTest.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
@Override
public void populateDAG(DAG dag, Configuration conf)
{
  Generator randomGenerator = dag.addOperator("randomGenerator", Generator.class);
  DefaultProcessor processor = dag.addOperator("process", DefaultProcessor.class);
  ControlAwareReceiver receiver = dag.addOperator("receiver", ControlAwareReceiver.class);
  dag.addStream("genToProcessor", randomGenerator.out, processor.input);
  dag.addStream("ProcessorToReceiver", processor.output, receiver.input);
  dag.setOperatorAttribute(processor, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<>(2));
}
 
Example #27
Source File: StreamPersistanceTests.java    From attic-apex-core with Apache License 2.0 4 votes vote down vote up
@Test
public void testDynamicPartitioning() throws ClassNotFoundException, IOException
{
  AscendingNumbersOperator ascend = dag.addOperator("ascend", new AscendingNumbersOperator());

  final TestReceiverOperator console = dag.addOperator("console", new TestReceiverOperator());
  dag.setOperatorAttribute(console, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<TestReceiverOperator>(2));
  dag.setOperatorAttribute(console, Context.OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener)new PartitioningTest.PartitionLoadWatch()));

  final PartitionedTestPersistanceOperator console1 = new PartitionedTestPersistanceOperator();

  StreamMeta s = dag.addStream("Stream1", ascend.outputPort, console.inport);
  dag.setInputPortAttribute(console.inport, PortContext.STREAM_CODEC, new TestPartitionCodec());
  s.persistUsing("persister", console1, console1.inport);

  dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, Integer.MAX_VALUE);
  StramTestSupport.MemoryStorageAgent msa = new StramTestSupport.MemoryStorageAgent();
  dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, msa);

  StreamingContainerManager dnm = new StreamingContainerManager(dag);
  PhysicalPlan plan = dnm.getPhysicalPlan();

  List<PTContainer> containers = plan.getContainers();
  Assert.assertEquals("number containers", 4, containers.size());

  for (int i = 0; i < containers.size(); ++i) {
    StreamingContainerManagerTest.assignContainer(dnm, "container" + (i + 1));
  }

  LogicalPlan.OperatorMeta passThruMeta = dag.getMeta(console);

  List<PTOperator> ptos = plan.getOperators(passThruMeta);

  PTOperator persistOperatorContainer = null;

  for (PTContainer container : plan.getContainers()) {
    for (PTOperator operator : container.getOperators()) {
      operator.setState(PTOperator.State.ACTIVE);
      if (operator.getName().equals("persister")) {
        persistOperatorContainer = operator;
      }
    }
  }

  // Check that persist operator is part of dependents redeployed
  Set<PTOperator> operators = plan.getDependents(ptos);
  logger.debug("Operators to be re-deployed = {}", operators);
  // Validate that persist operator is part of dependents
  assertTrue("persist operator should be part of the operators to be redeployed", operators.contains(persistOperatorContainer));

  LogicalPlan.StreamMeta s1 = (LogicalPlan.StreamMeta)s;
  StreamCodec codec = s1.getPersistOperatorInputPort().getStreamCodec();

  assertEquals("Codec should be instance of StreamCodecWrapper", codec instanceof StreamCodecWrapperForPersistance, true);
  StreamCodecWrapperForPersistance wrapperCodec = (StreamCodecWrapperForPersistance)codec;

  Entry<InputPortMeta, Collection<PartitionKeys>> keys = (Entry<InputPortMeta, Collection<PartitionKeys>>)wrapperCodec.inputPortToPartitionMap.entrySet().iterator().next();
  logger.debug(keys.toString());
  assertEquals("Size of partitions should be 2", 2, keys.getValue().size());

  for (PTOperator ptOperator : ptos) {
    PartitioningTest.PartitionLoadWatch.put(ptOperator, -1);
    plan.onStatusUpdate(ptOperator);
  }

  dnm.processEvents();

  assertEquals("Input port map", wrapperCodec.inputPortToPartitionMap.size(), 1);

  keys = (Entry<InputPortMeta, Collection<PartitionKeys>>)wrapperCodec.inputPortToPartitionMap.entrySet().iterator().next();
  assertEquals("Size of partitions should be 1 after repartition", 1, keys.getValue().size());
  logger.debug(keys.toString());
}
 
Example #28
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 4 votes vote down vote up
@Test
public void testSingleFinalUnifierInputOverride()
{
  LogicalPlan dag = new LogicalPlan();

  GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
  dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(3));
  OperatorMeta o1Meta = dag.getMeta(o1);

  GenericTestOperator o2 =  dag.addOperator("o2", GenericTestOperator.class);
  dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));
  dag.setInputPortAttribute(o2.inport1, PortContext.UNIFIER_SINGLE_FINAL, true);
  OperatorMeta o2Meta = dag.getMeta(o2);

  dag.addStream("o1.outport1", o1.outport1, o2.inport1);

  dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, 10);

  TestPlanContext ctx = new TestPlanContext();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);

  PhysicalPlan plan = new PhysicalPlan(dag, ctx);
  Assert.assertEquals("number of containers", 6, plan.getContainers().size());

  Assert.assertEquals("o1 merge unifiers", 1, plan.getMergeOperators(o1Meta).size());

  dag.setOutputPortAttribute(o1.outport1, PortContext.UNIFIER_SINGLE_FINAL, false);
  ctx = new TestPlanContext();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
  plan = new PhysicalPlan(dag, ctx);
  Assert.assertEquals("number of containers", 6, plan.getContainers().size());

  Assert.assertEquals("o1 merge unifiers", 1, plan.getMergeOperators(o1Meta).size());

  dag.setOutputPortAttribute(o1.outport1, PortContext.UNIFIER_SINGLE_FINAL, true);
  dag.setInputPortAttribute(o2.inport1, PortContext.UNIFIER_SINGLE_FINAL, false);
  ctx = new TestPlanContext();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
  plan = new PhysicalPlan(dag, ctx);
  Assert.assertEquals("number of containers", 5, plan.getContainers().size());

  Set<String> expectedNames = Sets.newHashSet(o1Meta.getMeta(o1.outport1).getUnifierMeta().getName(), o2Meta.getName());
  for (int i = 3; i < 5; ++i) {
    PTContainer container = plan.getContainers().get(i);
    Assert.assertEquals("o2 container size", 2, container.getOperators().size());

    Set<String> names = Sets.newHashSet();
    for (PTOperator operator : container.getOperators()) {
      names.add(operator.getOperatorMeta().getName());
    }
    Assert.assertEquals("o2 container operators", expectedNames, names);
  }
}
 
Example #29
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 4 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testInline()
{

  LogicalPlan dag = new LogicalPlan();

  GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
  GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
  GenericTestOperator o3 = dag.addOperator("o3", GenericTestOperator.class);

  PartitioningTestOperator partOperator = dag.addOperator("partNode", PartitioningTestOperator.class);
  partOperator.partitionKeys = new Integer[] {0,1};
  dag.getMeta(partOperator).getAttributes().put(OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(partOperator.partitionKeys.length));

  dag.addStream("o1_outport1", o1.outport1, o2.inport1, o3.inport1, partOperator.inport1)
          .setLocality(null);

  // same container for o2 and o3
  dag.addStream("o2_outport1", o2.outport1, o3.inport2)
      .setLocality(Locality.CONTAINER_LOCAL);

  dag.addStream("o3_outport1", o3.outport1, partOperator.inport2);

  int maxContainers = 4;
  dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, maxContainers);
  dag.setAttribute(OperatorContext.STORAGE_AGENT, new TestPlanContext());

  PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
  Assert.assertEquals("number of containers", maxContainers, plan.getContainers().size());
  Assert.assertEquals("operators container 0", 1, plan.getContainers().get(0).getOperators().size());

  Assert.assertEquals("operators container 0", 1, plan.getContainers().get(0).getOperators().size());
  Set<OperatorMeta> c2ExpNodes = Sets.newHashSet(dag.getMeta(o2), dag.getMeta(o3));
  Set<OperatorMeta> c2ActNodes = new HashSet<>();
  PTContainer c2 = plan.getContainers().get(1);
  for (PTOperator pNode : c2.getOperators()) {
    c2ActNodes.add(pNode.getOperatorMeta());
  }
  Assert.assertEquals("operators " + c2, c2ExpNodes, c2ActNodes);

  // one container per partition
  OperatorMeta partOperMeta = dag.getMeta(partOperator);
  List<PTOperator> partitions = plan.getOperators(partOperMeta);
  for (PTOperator partition : partitions) {
    Assert.assertEquals("operators container" + partition, 1, partition.getContainer().getOperators().size());
  }

}
 
Example #30
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 4 votes vote down vote up
/**
 * Test unifier gets removed when number partitions drops to 1.
 */
@Test
public void testRepartitioningScaleDownSinglePartition()
{
  LogicalPlan dag = new LogicalPlan();

  TestInputOperator<?> o1 = dag.addOperator("o1", TestInputOperator.class);
  GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);

  dag.addStream("o1.outport1", o1.output, o2.inport1);
  OperatorMeta o1Meta = dag.getMeta(o1);
  dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));
  dag.setOperatorAttribute(o1, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[]{new PartitioningTest.PartitionLoadWatch()}));

  TestPlanContext ctx = new TestPlanContext();
  dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
  PhysicalPlan plan = new PhysicalPlan(dag, ctx);

  List<PTOperator> o1Partitions = plan.getOperators(o1Meta);
  Assert.assertEquals("partitions " + o1Partitions, 2, o1Partitions.size());
  PTOperator o1p1 = o1Partitions.get(0);
  PTOperator p1Doper = o1p1.getOutputs().get(0).sinks.get(0).target;
  Assert.assertSame("", p1Doper.getOperatorMeta(), o1Meta.getMeta(o1.output).getUnifierMeta());
  Assert.assertTrue("unifier ", p1Doper.isUnifier());
  Assert.assertEquals("Unifiers " + o1Meta, 1, o1p1.getOutputs().get(0).sinks.size());

  Collection<PTOperator> o1Unifiers = new ArrayList<>(plan.getOperators(dag.getMeta(o2)).get(0).upstreamMerge.values());

  StatsListener l = o1p1.statsListeners.get(0);
  Assert.assertTrue("stats handlers " + o1p1.statsListeners, l instanceof PartitioningTest.PartitionLoadWatch);
  PartitioningTest.PartitionLoadWatch.put(o1p1, -1);
  PartitioningTest.PartitionLoadWatch.put(o1Partitions.get(1), -1);

  plan.onStatusUpdate(o1p1);
  plan.onStatusUpdate(o1Partitions.get(1));
  Assert.assertEquals("partition scaling triggered", 1, ctx.events.size());
  ctx.events.remove(0).run();

  List<PTOperator> o1NewPartitions = plan.getOperators(o1Meta);
  Assert.assertEquals("partitions " + o1NewPartitions, 1, o1NewPartitions.size());

  List<PTOperator> o1NewUnifiers = new ArrayList<>(plan.getOperators(dag.getMeta(o2)).get(0).upstreamMerge.values());

  Assert.assertEquals("unifiers " + o1Meta, 0, o1NewUnifiers.size());
  p1Doper = o1p1.getOutputs().get(0).sinks.get(0).target;
  Assert.assertTrue("", p1Doper.getOperatorMeta() == dag.getMeta(o2));
  Assert.assertFalse("unifier ", p1Doper.isUnifier());

  Assert.assertTrue("removed unifier from deployment " + ctx.undeploy, ctx.undeploy.containsAll(o1Unifiers));
  Assert.assertFalse("removed unifier from deployment " + ctx.deploy, ctx.deploy.containsAll(o1Unifiers));

  // scale up, ensure unifier is setup at activation checkpoint
  setActivationCheckpoint(o1NewPartitions.get(0), 3);
  PartitioningTest.PartitionLoadWatch.put(o1NewPartitions.get(0), 1);
  plan.onStatusUpdate(o1NewPartitions.get(0));
  Assert.assertEquals("partition scaling triggered", 1, ctx.events.size());
  ctx.events.remove(0).run();

  o1NewUnifiers.addAll(plan.getOperators(dag.getMeta(o2)).get(0).upstreamMerge.values());

  Assert.assertEquals("unifiers " + o1Meta, 1, o1NewUnifiers.size());
  Assert.assertEquals("unifier activation checkpoint " + o1Meta, 3, o1NewUnifiers.get(0).recoveryCheckpoint.windowId);
}