org.apache.flink.runtime.io.network.buffer.BufferPoolOwner Java Examples
The following examples show how to use
org.apache.flink.runtime.io.network.buffer.BufferPoolOwner.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ResultPartition.java From flink with Apache License 2.0 | 6 votes |
public ResultPartition( String owningTaskName, ResultPartitionID partitionId, ResultPartitionType partitionType, ResultSubpartition[] subpartitions, int numTargetKeyGroups, ResultPartitionManager partitionManager, FunctionWithException<BufferPoolOwner, BufferPool, IOException> bufferPoolFactory) { this.owningTaskName = checkNotNull(owningTaskName); this.partitionId = checkNotNull(partitionId); this.partitionType = checkNotNull(partitionType); this.subpartitions = checkNotNull(subpartitions); this.numTargetKeyGroups = numTargetKeyGroups; this.partitionManager = checkNotNull(partitionManager); this.bufferPoolFactory = bufferPoolFactory; }
Example #2
Source File: ResultPartitionBuilder.java From flink with Apache License 2.0 | 6 votes |
public ResultPartition build() { ResultPartitionFactory resultPartitionFactory = new ResultPartitionFactory( partitionManager, channelManager, networkBufferPool, blockingSubpartitionType, networkBuffersPerChannel, floatingNetworkBuffersPerGate, networkBufferSize, releasedOnConsumption); FunctionWithException<BufferPoolOwner, BufferPool, IOException> factory = bufferPoolFactory.orElseGet(() -> resultPartitionFactory.createBufferPoolFactory(numberOfSubpartitions, partitionType)); return resultPartitionFactory.create( "Result Partition task", partitionId, partitionType, numberOfSubpartitions, numTargetKeyGroups, factory); }
Example #3
Source File: ResultPartition.java From flink with Apache License 2.0 | 6 votes |
public ResultPartition( String owningTaskName, int partitionIndex, ResultPartitionID partitionId, ResultPartitionType partitionType, ResultSubpartition[] subpartitions, int numTargetKeyGroups, ResultPartitionManager partitionManager, @Nullable BufferCompressor bufferCompressor, FunctionWithException<BufferPoolOwner, BufferPool, IOException> bufferPoolFactory) { this.owningTaskName = checkNotNull(owningTaskName); Preconditions.checkArgument(0 <= partitionIndex, "The partition index must be positive."); this.partitionIndex = partitionIndex; this.partitionId = checkNotNull(partitionId); this.partitionType = checkNotNull(partitionType); this.subpartitions = checkNotNull(subpartitions); this.numTargetKeyGroups = numTargetKeyGroups; this.partitionManager = checkNotNull(partitionManager); this.bufferCompressor = bufferCompressor; this.bufferPoolFactory = bufferPoolFactory; }
Example #4
Source File: ResultPartitionFactory.java From flink with Apache License 2.0 | 6 votes |
/** * The minimum pool size should be <code>numberOfSubpartitions + 1</code> for two considerations: * * <p>1. StreamTask can only process input if there is at-least one available buffer on output side, so it might cause * stuck problem if the minimum pool size is exactly equal to the number of subpartitions, because every subpartition * might maintain a partial unfilled buffer. * * <p>2. Increases one more buffer for every output LocalBufferPool to void performance regression if processing input is * based on at-least one buffer available on output side. */ @VisibleForTesting FunctionWithException<BufferPoolOwner, BufferPool, IOException> createBufferPoolFactory( int numberOfSubpartitions, ResultPartitionType type) { return bufferPoolOwner -> { int maxNumberOfMemorySegments = type.isBounded() ? numberOfSubpartitions * networkBuffersPerChannel + floatingNetworkBuffersPerGate : Integer.MAX_VALUE; // If the partition type is back pressure-free, we register with the buffer pool for // callbacks to release memory. return bufferPoolFactory.createBufferPool( numberOfSubpartitions + 1, maxNumberOfMemorySegments, type.hasBackPressure() ? null : bufferPoolOwner, numberOfSubpartitions, maxBuffersPerChannel); }; }
Example #5
Source File: ReleaseOnConsumptionResultPartition.java From flink with Apache License 2.0 | 6 votes |
ReleaseOnConsumptionResultPartition( String owningTaskName, int partitionIndex, ResultPartitionID partitionId, ResultPartitionType partitionType, ResultSubpartition[] subpartitions, int numTargetKeyGroups, ResultPartitionManager partitionManager, @Nullable BufferCompressor bufferCompressor, FunctionWithException<BufferPoolOwner, BufferPool, IOException> bufferPoolFactory) { super( owningTaskName, partitionIndex, partitionId, partitionType, subpartitions, numTargetKeyGroups, partitionManager, bufferCompressor, bufferPoolFactory); this.consumedSubpartitions = new boolean[subpartitions.length]; this.numUnconsumedSubpartitions = subpartitions.length; }
Example #6
Source File: ResultPartitionBuilder.java From flink with Apache License 2.0 | 6 votes |
public ResultPartition build() { ResultPartitionFactory resultPartitionFactory = new ResultPartitionFactory( partitionManager, channelManager, networkBufferPool, blockingSubpartitionType, networkBuffersPerChannel, floatingNetworkBuffersPerGate, networkBufferSize, releasedOnConsumption, blockingShuffleCompressionEnabled, compressionCodec, maxBuffersPerChannel); FunctionWithException<BufferPoolOwner, BufferPool, IOException> factory = bufferPoolFactory.orElseGet(() -> resultPartitionFactory.createBufferPoolFactory(numberOfSubpartitions, partitionType)); return resultPartitionFactory.create( "Result Partition task", partitionIndex, partitionId, partitionType, numberOfSubpartitions, numTargetKeyGroups, factory); }
Example #7
Source File: ResultPartitionFactory.java From flink with Apache License 2.0 | 5 votes |
@VisibleForTesting public ResultPartition create( String taskNameWithSubtaskAndId, ResultPartitionID id, ResultPartitionType type, int numberOfSubpartitions, int maxParallelism, FunctionWithException<BufferPoolOwner, BufferPool, IOException> bufferPoolFactory) { ResultSubpartition[] subpartitions = new ResultSubpartition[numberOfSubpartitions]; ResultPartition partition = forcePartitionReleaseOnConsumption || !type.isBlocking() ? new ReleaseOnConsumptionResultPartition( taskNameWithSubtaskAndId, id, type, subpartitions, maxParallelism, partitionManager, bufferPoolFactory) : new ResultPartition( taskNameWithSubtaskAndId, id, type, subpartitions, maxParallelism, partitionManager, bufferPoolFactory); createSubpartitions(partition, type, blockingSubpartitionType, subpartitions); LOG.debug("{}: Initialized {}", taskNameWithSubtaskAndId, this); return partition; }
Example #8
Source File: ResultPartitionFactory.java From flink with Apache License 2.0 | 5 votes |
@VisibleForTesting FunctionWithException<BufferPoolOwner, BufferPool, IOException> createBufferPoolFactory( int numberOfSubpartitions, ResultPartitionType type) { return p -> { int maxNumberOfMemorySegments = type.isBounded() ? numberOfSubpartitions * networkBuffersPerChannel + floatingNetworkBuffersPerGate : Integer.MAX_VALUE; // If the partition type is back pressure-free, we register with the buffer pool for // callbacks to release memory. return bufferPoolFactory.createBufferPool(numberOfSubpartitions, maxNumberOfMemorySegments, type.hasBackPressure() ? Optional.empty() : Optional.of(p)); }; }
Example #9
Source File: ReleaseOnConsumptionResultPartition.java From flink with Apache License 2.0 | 5 votes |
ReleaseOnConsumptionResultPartition( String owningTaskName, ResultPartitionID partitionId, ResultPartitionType partitionType, ResultSubpartition[] subpartitions, int numTargetKeyGroups, ResultPartitionManager partitionManager, FunctionWithException<BufferPoolOwner, BufferPool, IOException> bufferPoolFactory) { super(owningTaskName, partitionId, partitionType, subpartitions, numTargetKeyGroups, partitionManager, bufferPoolFactory); this.consumedSubpartitions = new boolean[subpartitions.length]; this.numUnconsumedSubpartitions = subpartitions.length; }
Example #10
Source File: ResultPartitionBuilder.java From flink with Apache License 2.0 | 4 votes |
public ResultPartitionBuilder setBufferPoolFactory( FunctionWithException<BufferPoolOwner, BufferPool, IOException> bufferPoolFactory) { this.bufferPoolFactory = Optional.of(bufferPoolFactory); return this; }
Example #11
Source File: ResultPartitionFactory.java From flink with Apache License 2.0 | 4 votes |
@VisibleForTesting public ResultPartition create( String taskNameWithSubtaskAndId, int partitionIndex, ResultPartitionID id, ResultPartitionType type, int numberOfSubpartitions, int maxParallelism, FunctionWithException<BufferPoolOwner, BufferPool, IOException> bufferPoolFactory) { BufferCompressor bufferCompressor = null; if (type.isBlocking() && blockingShuffleCompressionEnabled) { bufferCompressor = new BufferCompressor(networkBufferSize, compressionCodec); } ResultSubpartition[] subpartitions = new ResultSubpartition[numberOfSubpartitions]; ResultPartition partition = forcePartitionReleaseOnConsumption || !type.isBlocking() ? new ReleaseOnConsumptionResultPartition( taskNameWithSubtaskAndId, partitionIndex, id, type, subpartitions, maxParallelism, partitionManager, bufferCompressor, bufferPoolFactory) : new ResultPartition( taskNameWithSubtaskAndId, partitionIndex, id, type, subpartitions, maxParallelism, partitionManager, bufferCompressor, bufferPoolFactory); createSubpartitions(partition, type, blockingSubpartitionType, subpartitions); LOG.debug("{}: Initialized {}", taskNameWithSubtaskAndId, this); return partition; }
Example #12
Source File: ResultPartitionBuilder.java From flink with Apache License 2.0 | 4 votes |
public ResultPartitionBuilder setBufferPoolFactory( FunctionWithException<BufferPoolOwner, BufferPool, IOException> bufferPoolFactory) { this.bufferPoolFactory = Optional.of(bufferPoolFactory); return this; }