Java Code Examples for com.datatorrent.api.DefaultPartition#getRequiredPartitionCount()

The following examples show how to use com.datatorrent.api.DefaultPartition#getRequiredPartitionCount() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PhysicalPlanTest.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
@Override
public Collection<Partition<PartitioningTestOperator>> definePartitions(Collection<Partition<PartitioningTestOperator>> partitions, PartitioningContext context)
{
  final int newPartitionCount = DefaultPartition.getRequiredPartitionCount(context, this.partitionCount);

  if (!fixedCapacity) {
    partitionKeys = new Integer[newPartitionCount];
    for (int i = 0; i < partitionKeys.length; i++) {
      partitionKeys[i] = i;
    }
  }

  List<Partition<PartitioningTestOperator>> newPartitions = new ArrayList<>(this.partitionKeys.length);
  for (Integer partitionKey : partitionKeys) {
    PartitioningTestOperator temp = new PartitioningTestOperator();
    temp.setPartitionCount(newPartitionCount);
    Partition<PartitioningTestOperator> p = new DefaultPartition<>(temp);
    PartitionKeys lpks = new PartitionKeys(2, Sets.newHashSet(partitionKey));
    p.getPartitionKeys().put(this.inport1, lpks);
    p.getPartitionKeys().put(this.inportWithCodec, lpks);
    p.getPartitionedInstance().pks = p.getPartitionKeys().values().toString();
    newPartitions.add(p);
  }

  return newPartitions;
}
 
Example 2
Source File: StatelessPartitioner.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<Partition<T>> definePartitions(Collection<Partition<T>> partitions, PartitioningContext context)
{
  final int newPartitionCount = DefaultPartition.getRequiredPartitionCount(context, this.partitionCount);
  logger.debug("define partitions, partitionCount current {} requested {}", partitions.size(), newPartitionCount);

  //Get a partition
  DefaultPartition<T> partition = (DefaultPartition<T>)partitions.iterator().next();
  Collection<Partition<T>> newPartitions;

  if (partitions.iterator().next().getStats() == null) {
    // first call to define partitions
    newPartitions = Lists.newArrayList();

    for (int partitionCounter = 0; partitionCounter < newPartitionCount; partitionCounter++) {
      newPartitions.add(new DefaultPartition<>(partition.getPartitionedInstance()));
    }

    // partition the stream that was first connected in the DAG and send full data to remaining input ports
    // this gives control over which stream to partition under default partitioning to the DAG writer
    List<InputPort<?>> inputPortList = context.getInputPorts();
    if (inputPortList != null && !inputPortList.isEmpty()) {
      DefaultPartition.assignPartitionKeys(newPartitions, inputPortList.iterator().next());
    }
  } else {
    // define partitions is being called again
    if (context.getParallelPartitionCount() != 0) {
      newPartitions = repartitionParallel(partitions, context);
    } else if (partition.getPartitionKeys().isEmpty()) {
      newPartitions = repartitionInputOperator(partitions);
    } else {
      newPartitions = repartition(partitions);
    }
  }

  logger.debug("new partition size {}", newPartitions.size());
  return newPartitions;
}
 
Example 3
Source File: UniqueValueCountAppender.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
/**
 * Assigns the partitions according to certain key values and keeps track of the
 * keys that each partition will be processing so that in the case of a
 * rollback, each partition will only clear the data that it is responsible for.
 */
@Override
public Collection<com.datatorrent.api.Partitioner.Partition<UniqueValueCountAppender<V>>> definePartitions(Collection<com.datatorrent.api.Partitioner.Partition<UniqueValueCountAppender<V>>> partitions, PartitioningContext context)
{
  final int finalCapacity = DefaultPartition.getRequiredPartitionCount(context, this.partitionCount);
  UniqueValueCountAppender<V> anOldOperator = partitions.iterator().next().getPartitionedInstance();
  partitions.clear();

  Collection<Partition<UniqueValueCountAppender<V>>> newPartitions = Lists.newArrayListWithCapacity(finalCapacity);

  for (int i = 0; i < finalCapacity; i++) {
    try {
      @SuppressWarnings("unchecked")
      UniqueValueCountAppender<V> statefulUniqueCount = this.getClass().newInstance();
      DefaultPartition<UniqueValueCountAppender<V>> partition = new DefaultPartition<UniqueValueCountAppender<V>>(statefulUniqueCount);
      newPartitions.add(partition);
    } catch (Throwable cause) {
      DTThrowable.rethrow(cause);
    }
  }

  DefaultPartition.assignPartitionKeys(Collections.unmodifiableCollection(newPartitions), input);
  int lPartitionMask = newPartitions.iterator().next().getPartitionKeys().get(input).mask;

  for (Partition<UniqueValueCountAppender<V>> statefulUniqueCountPartition : newPartitions) {
    UniqueValueCountAppender<V> statefulUniqueCountInstance = statefulUniqueCountPartition.getPartitionedInstance();

    statefulUniqueCountInstance.partitionKeys = statefulUniqueCountPartition.getPartitionKeys().get(input).partitions;
    statefulUniqueCountInstance.partitionMask = lPartitionMask;
    statefulUniqueCountInstance.store = anOldOperator.store;
    statefulUniqueCountInstance.tableName = anOldOperator.tableName;
    statefulUniqueCountInstance.cacheManager = anOldOperator.cacheManager;
  }
  return newPartitions;
}
 
Example 4
Source File: AbstractFileInputOperator.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
protected int getNewPartitionCount(Collection<Partition<AbstractFileInputOperator<T>>> partitions, PartitioningContext context)
{
  return DefaultPartition.getRequiredPartitionCount(context, this.partitionCount);
}