org.apache.flink.runtime.io.network.partition.consumer.InputGate Java Examples
The following examples show how to use
org.apache.flink.runtime.io.network.partition.consumer.InputGate.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StreamNetworkBenchmarkEnvironment.java From flink with Apache License 2.0 | 6 votes |
private InputGate createInputGate(TaskManagerLocation senderLocation) throws Exception { IndexedInputGate[] gates = new IndexedInputGate[partitionIds.length]; for (int gateIndex = 0; gateIndex < gates.length; ++gateIndex) { final InputGateDeploymentDescriptor gateDescriptor = createInputGateDeploymentDescriptor( senderLocation, gateIndex, location); final IndexedInputGate gate = createInputGateWithMetrics(gateFactory, gateDescriptor, gateIndex); gate.setup(); gates[gateIndex] = gate; } if (gates.length > 1) { return new UnionInputGate(gates); } else { return gates[0]; } }
Example #2
Source File: StreamNetworkBenchmarkEnvironment.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public SerializingLongReceiver createReceiver() throws Exception { TaskManagerLocation senderLocation = new TaskManagerLocation( ResourceID.generate(), LOCAL_ADDRESS, senderEnv.getConnectionManager().getDataPort()); InputGate receiverGate = createInputGate( jobId, dataSetID, executionAttemptID, senderLocation, receiverEnv, channels); SerializingLongReceiver receiver = new SerializingLongReceiver(receiverGate, channels * partitionIds.length); receiver.start(); return receiver; }
Example #3
Source File: InputProcessorUtil.java From flink with Apache License 2.0 | 6 votes |
public static CheckpointedInputGate createCheckpointedInputGate( AbstractInvokable toNotifyOnCheckpoint, CheckpointingMode checkpointMode, IOManager ioManager, InputGate inputGate, Configuration taskManagerConfig, String taskName) throws IOException { int pageSize = ConfigurationParserUtils.getPageSize(taskManagerConfig); BufferStorage bufferStorage = createBufferStorage( checkpointMode, ioManager, pageSize, taskManagerConfig, taskName); CheckpointBarrierHandler barrierHandler = createCheckpointBarrierHandler( checkpointMode, inputGate.getNumberOfInputChannels(), taskName, toNotifyOnCheckpoint); return new CheckpointedInputGate(inputGate, bufferStorage, barrierHandler); }
Example #4
Source File: BatchTask.java From flink with Apache License 2.0 | 5 votes |
/** * Creates the record readers for the number of inputs as defined by {@link #getNumTaskInputs()}. * * This method requires that the task configuration, the driver, and the user-code class loader are set. */ protected void initInputReaders() throws Exception { final int numInputs = getNumTaskInputs(); final MutableReader<?>[] inputReaders = new MutableReader<?>[numInputs]; int currentReaderOffset = 0; for (int i = 0; i < numInputs; i++) { // ---------------- create the input readers --------------------- // in case where a logical input unions multiple physical inputs, create a union reader final int groupSize = this.config.getGroupSize(i); if (groupSize == 1) { // non-union case inputReaders[i] = new MutableRecordReader<IOReadableWritable>( getEnvironment().getInputGate(currentReaderOffset), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else if (groupSize > 1){ // union case InputGate[] readers = new InputGate[groupSize]; for (int j = 0; j < groupSize; ++j) { readers[j] = getEnvironment().getInputGate(currentReaderOffset + j); } inputReaders[i] = new MutableRecordReader<IOReadableWritable>( new UnionInputGate(readers), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else { throw new Exception("Illegal input group size in task configuration: " + groupSize); } currentReaderOffset += groupSize; } this.inputReaders = inputReaders; // final sanity check if (currentReaderOffset != this.config.getNumInputs()) { throw new Exception("Illegal configuration: Number of input gates and group sizes are not consistent."); } }
Example #5
Source File: StreamNetworkBenchmarkEnvironment.java From flink with Apache License 2.0 | 5 votes |
/** * Note: It should be guaranteed that {@link #createResultPartitionWriter(int)} has been * called before creating the receiver. Otherwise it might cause unexpected behaviors when * {@link org.apache.flink.runtime.io.network.partition.PartitionNotFoundException} happens * in {@link SingleInputGateBenchmarkFactory.TestRemoteInputChannel}. */ public SerializingLongReceiver createReceiver() throws Exception { TaskManagerLocation senderLocation = new TaskManagerLocation( ResourceID.generate(), LOCAL_ADDRESS, dataPort); InputGate receiverGate = createInputGate(senderLocation); SerializingLongReceiver receiver = new SerializingLongReceiver(receiverGate, channels * partitionIds.length); receiver.start(); return receiver; }
Example #6
Source File: StreamTask.java From flink with Apache License 2.0 | 5 votes |
private void readRecoveredChannelState() throws IOException, InterruptedException { ChannelStateReader reader = getEnvironment().getTaskStateManager().getChannelStateReader(); if (!reader.hasChannelStates()) { requestPartitions(); return; } ResultPartitionWriter[] writers = getEnvironment().getAllWriters(); if (writers != null) { for (ResultPartitionWriter writer : writers) { writer.readRecoveredState(reader); } } // It would get possible benefits to recovery input side after output side, which guarantees the // output can request more floating buffers from global firstly. InputGate[] inputGates = getEnvironment().getAllInputGates(); if (inputGates != null && inputGates.length > 0) { CompletableFuture[] futures = new CompletableFuture[inputGates.length]; for (int i = 0; i < inputGates.length; i++) { futures[i] = inputGates[i].readRecoveredState(channelIOExecutor, reader); } // Note that we must request partition after all the single gates finished recovery. CompletableFuture.allOf(futures).thenRun(() -> mainMailboxExecutor.execute( this::requestPartitions, "Input gates request partitions")); } }
Example #7
Source File: InputProcessorUtil.java From flink with Apache License 2.0 | 5 votes |
/** * @return a pair of {@link CheckpointedInputGate} created for two corresponding * {@link InputGate}s supplied as parameters. */ public static CheckpointedInputGate[] createCheckpointedInputGatePair( AbstractInvokable toNotifyOnCheckpoint, CheckpointingMode checkpointMode, IOManager ioManager, InputGate inputGate1, InputGate inputGate2, Configuration taskManagerConfig, String taskName) throws IOException { int pageSize = ConfigurationParserUtils.getPageSize(taskManagerConfig); BufferStorage mainBufferStorage1 = createBufferStorage( checkpointMode, ioManager, pageSize, taskManagerConfig, taskName); BufferStorage mainBufferStorage2 = createBufferStorage( checkpointMode, ioManager, pageSize, taskManagerConfig, taskName); checkState(mainBufferStorage1.getMaxBufferedBytes() == mainBufferStorage2.getMaxBufferedBytes()); BufferStorage linkedBufferStorage1 = new LinkedBufferStorage( mainBufferStorage1, mainBufferStorage2, mainBufferStorage1.getMaxBufferedBytes()); BufferStorage linkedBufferStorage2 = new LinkedBufferStorage( mainBufferStorage2, mainBufferStorage1, mainBufferStorage1.getMaxBufferedBytes()); CheckpointBarrierHandler barrierHandler = createCheckpointBarrierHandler( checkpointMode, inputGate1.getNumberOfInputChannels() + inputGate2.getNumberOfInputChannels(), taskName, toNotifyOnCheckpoint); return new CheckpointedInputGate[] { new CheckpointedInputGate(inputGate1, linkedBufferStorage1, barrierHandler), new CheckpointedInputGate(inputGate2, linkedBufferStorage2, barrierHandler, inputGate1.getNumberOfInputChannels()) }; }
Example #8
Source File: PartialConsumePipelinedResultTest.java From flink with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { InputGate gate = getEnvironment().getInputGate(0); gate.requestPartitions(); Buffer buffer = gate.getNext().orElseThrow(IllegalStateException::new).getBuffer(); if (buffer != null) { buffer.recycleBuffer(); } }
Example #9
Source File: CheckpointBarrierUnaligner.java From flink with Apache License 2.0 | 5 votes |
ThreadSafeUnaligner(SubtaskCheckpointCoordinator checkpointCoordinator, CheckpointBarrierUnaligner handler, InputGate... inputGates) { storeNewBuffers = Arrays.stream(inputGates) .flatMap(gate -> gate.getChannelInfos().stream()) .collect(Collectors.toMap(Function.identity(), info -> false)); numOpenChannels = storeNewBuffers.size(); this.checkpointCoordinator = checkpointCoordinator; this.handler = handler; }
Example #10
Source File: SerializingLongReceiver.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("WeakerAccess") public SerializingLongReceiver(InputGate inputGate, int expectedRepetitionsOfExpectedRecord) { super(expectedRepetitionsOfExpectedRecord); this.reader = new MutableRecordReader<>( inputGate, new String[]{ EnvironmentInformation.getTemporaryFileDirectory() }); }
Example #11
Source File: CheckpointedInputGate.java From flink with Apache License 2.0 | 5 votes |
public CheckpointedInputGate( InputGate inputGate, BufferStorage bufferStorage, String taskName, @Nullable AbstractInvokable toNotifyOnCheckpoint) { this( inputGate, bufferStorage, new CheckpointBarrierAligner( inputGate.getNumberOfInputChannels(), taskName, toNotifyOnCheckpoint) ); }
Example #12
Source File: InputGateUtil.java From flink with Apache License 2.0 | 5 votes |
public static InputGate createInputGate(List<IndexedInputGate> inputGates) { if (inputGates.size() <= 0) { throw new RuntimeException("No such input gate."); } if (inputGates.size() == 1) { return inputGates.get(0); } else { return new UnionInputGate(inputGates.toArray(new IndexedInputGate[0])); } }
Example #13
Source File: BatchTask.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Creates the record readers for the number of inputs as defined by {@link #getNumTaskInputs()}. * * This method requires that the task configuration, the driver, and the user-code class loader are set. */ protected void initInputReaders() throws Exception { final int numInputs = getNumTaskInputs(); final MutableReader<?>[] inputReaders = new MutableReader<?>[numInputs]; int currentReaderOffset = 0; for (int i = 0; i < numInputs; i++) { // ---------------- create the input readers --------------------- // in case where a logical input unions multiple physical inputs, create a union reader final int groupSize = this.config.getGroupSize(i); if (groupSize == 1) { // non-union case inputReaders[i] = new MutableRecordReader<IOReadableWritable>( getEnvironment().getInputGate(currentReaderOffset), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else if (groupSize > 1){ // union case InputGate[] readers = new InputGate[groupSize]; for (int j = 0; j < groupSize; ++j) { readers[j] = getEnvironment().getInputGate(currentReaderOffset + j); } inputReaders[i] = new MutableRecordReader<IOReadableWritable>( new UnionInputGate(readers), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else { throw new Exception("Illegal input group size in task configuration: " + groupSize); } currentReaderOffset += groupSize; } this.inputReaders = inputReaders; // final sanity check if (currentReaderOffset != this.config.getNumInputs()) { throw new Exception("Illegal configuration: Number of input gates and group sizes are not consistent."); } }
Example #14
Source File: AbstractRecordReader.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Creates a new AbstractRecordReader that de-serializes records from the given input gate and * can spill partial records to disk, if they grow large. * * @param inputGate The input gate to read from. * @param tmpDirectories The temp directories. USed for spilling if the reader concurrently * reconstructs multiple large records. */ @SuppressWarnings("unchecked") protected AbstractRecordReader(InputGate inputGate, String[] tmpDirectories) { super(inputGate); // Initialize one deserializer per input channel this.recordDeserializers = new SpillingAdaptiveSpanningRecordDeserializer[inputGate.getNumberOfInputChannels()]; for (int i = 0; i < recordDeserializers.length; i++) { recordDeserializers[i] = new SpillingAdaptiveSpanningRecordDeserializer<T>(tmpDirectories); } }
Example #15
Source File: CheckpointBarrierUnaligner.java From flink with Apache License 2.0 | 5 votes |
CheckpointBarrierUnaligner( SubtaskCheckpointCoordinator checkpointCoordinator, String taskName, AbstractInvokable toNotifyOnCheckpoint, InputGate... inputGates) { super(toNotifyOnCheckpoint); this.taskName = taskName; hasInflightBuffers = Arrays.stream(inputGates) .flatMap(gate -> gate.getChannelInfos().stream()) .collect(Collectors.toMap(Function.identity(), info -> false)); threadSafeUnaligner = new ThreadSafeUnaligner(checkNotNull(checkpointCoordinator), this, inputGates); }
Example #16
Source File: StreamNetworkBenchmarkEnvironment.java From flink with Apache License 2.0 | 5 votes |
public SerializingLongReceiver createReceiver() throws Exception { TaskManagerLocation senderLocation = new TaskManagerLocation( ResourceID.generate(), LOCAL_ADDRESS, dataPort); InputGate receiverGate = createInputGate(senderLocation); SerializingLongReceiver receiver = new SerializingLongReceiver(receiverGate, channels * partitionIds.length); receiver.start(); return receiver; }
Example #17
Source File: AbstractRecordReader.java From flink with Apache License 2.0 | 5 votes |
/** * Creates a new AbstractRecordReader that de-serializes records from the given input gate and * can spill partial records to disk, if they grow large. * * @param inputGate The input gate to read from. * @param tmpDirectories The temp directories. USed for spilling if the reader concurrently * reconstructs multiple large records. */ @SuppressWarnings("unchecked") protected AbstractRecordReader(InputGate inputGate, String[] tmpDirectories) { super(inputGate); // Initialize one deserializer per input channel recordDeserializers = inputGate.getChannelInfos().stream() .collect(Collectors.toMap( Function.identity(), channelInfo -> new SpillingAdaptiveSpanningRecordDeserializer<>(tmpDirectories))); }
Example #18
Source File: NettyShuffleEnvironment.java From flink with Apache License 2.0 | 5 votes |
/** * Registers legacy network metric groups before shuffle service refactoring. * * <p>Registers legacy metric groups if shuffle service implementation is original default one. * * @deprecated should be removed in future */ @SuppressWarnings("DeprecatedIsStillUsed") @Deprecated public void registerLegacyNetworkMetrics( MetricGroup metricGroup, ResultPartitionWriter[] producedPartitions, InputGate[] inputGates) { NettyShuffleMetricFactory.registerLegacyNetworkMetrics( config.isNetworkDetailedMetrics(), metricGroup, producedPartitions, inputGates); }
Example #19
Source File: OneInputStreamTask.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void init() throws Exception { StreamConfig configuration = getConfiguration(); TypeSerializer<IN> inSerializer = configuration.getTypeSerializerIn1(getUserCodeClassLoader()); int numberOfInputs = configuration.getNumberOfInputs(); if (numberOfInputs > 0) { InputGate[] inputGates = getEnvironment().getAllInputGates(); inputProcessor = new StreamInputProcessor<>( inputGates, inSerializer, this, configuration.getCheckpointMode(), getCheckpointLock(), getEnvironment().getIOManager(), getEnvironment().getTaskManagerInfo().getConfiguration(), getStreamStatusMaintainer(), this.headOperator, getEnvironment().getMetricGroup().getIOMetricGroup(), inputWatermarkGauge); } headOperator.getMetricGroup().gauge(MetricNames.IO_CURRENT_INPUT_WATERMARK, this.inputWatermarkGauge); // wrap watermark gauge since registered metrics must be unique getEnvironment().getMetricGroup().gauge(MetricNames.IO_CURRENT_INPUT_WATERMARK, this.inputWatermarkGauge::getValue); }
Example #20
Source File: AlternatingCheckpointBarrierHandlerTest.java From flink with Apache License 2.0 | 5 votes |
private static AlternatingCheckpointBarrierHandler barrierHandler(SingleInputGate inputGate, AbstractInvokable target) { String taskName = "test"; InputGate[] channelIndexToInputGate = new InputGate[inputGate.getNumberOfInputChannels()]; Arrays.fill(channelIndexToInputGate, inputGate); return new AlternatingCheckpointBarrierHandler( new CheckpointBarrierAligner(taskName, target, inputGate), new CheckpointBarrierUnaligner(TestSubtaskCheckpointCoordinator.INSTANCE, taskName, target, inputGate), target); }
Example #21
Source File: InputGateUtil.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public static InputGate createInputGate(InputGate[] inputGates) { if (inputGates.length <= 0) { throw new RuntimeException("No such input gate."); } if (inputGates.length < 2) { return inputGates[0]; } else { return new UnionInputGate(inputGates); } }
Example #22
Source File: StreamMockEnvironment.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public StreamMockEnvironment( JobID jobID, ExecutionAttemptID executionAttemptID, Configuration jobConfig, Configuration taskConfig, ExecutionConfig executionConfig, long memorySize, MockInputSplitProvider inputSplitProvider, int bufferSize, TaskStateManager taskStateManager) { this.jobID = jobID; this.executionAttemptID = executionAttemptID; int subtaskIndex = 0; this.taskInfo = new TaskInfo( "", /* task name */ 1, /* num key groups / max parallelism */ subtaskIndex, /* index of this subtask */ 1, /* num subtasks */ 0 /* attempt number */); this.jobConfiguration = jobConfig; this.taskConfiguration = taskConfig; this.inputs = new LinkedList<InputGate>(); this.outputs = new LinkedList<ResultPartitionWriter>(); this.memManager = new MemoryManager(memorySize, 1); this.ioManager = new IOManagerAsync(); this.taskStateManager = Preconditions.checkNotNull(taskStateManager); this.aggregateManager = new TestGlobalAggregateManager(); this.inputSplitProvider = inputSplitProvider; this.bufferSize = bufferSize; this.executionConfig = executionConfig; this.accumulatorRegistry = new AccumulatorRegistry(jobID, getExecutionId()); KvStateRegistry registry = new KvStateRegistry(); this.kvStateRegistry = registry.createTaskRegistry(jobID, getJobVertexId()); }
Example #23
Source File: StreamMockEnvironment.java From flink with Apache License 2.0 | 5 votes |
public StreamMockEnvironment( JobID jobID, ExecutionAttemptID executionAttemptID, Configuration jobConfig, Configuration taskConfig, ExecutionConfig executionConfig, long memorySize, MockInputSplitProvider inputSplitProvider, int bufferSize, TaskStateManager taskStateManager) { this.jobID = jobID; this.executionAttemptID = executionAttemptID; int subtaskIndex = 0; this.taskInfo = new TaskInfo( "", /* task name */ 1, /* num key groups / max parallelism */ subtaskIndex, /* index of this subtask */ 1, /* num subtasks */ 0 /* attempt number */); this.jobConfiguration = jobConfig; this.taskConfiguration = taskConfig; this.inputs = new LinkedList<InputGate>(); this.outputs = new LinkedList<ResultPartitionWriter>(); this.memManager = new MemoryManager(memorySize, 1); this.ioManager = new IOManagerAsync(); this.taskStateManager = Preconditions.checkNotNull(taskStateManager); this.aggregateManager = new TestGlobalAggregateManager(); this.inputSplitProvider = inputSplitProvider; this.bufferSize = bufferSize; this.executionConfig = executionConfig; this.accumulatorRegistry = new AccumulatorRegistry(jobID, getExecutionId()); KvStateRegistry registry = new KvStateRegistry(); this.kvStateRegistry = registry.createTaskRegistry(jobID, getJobVertexId()); }
Example #24
Source File: AbstractRecordReader.java From flink with Apache License 2.0 | 5 votes |
/** * Creates a new AbstractRecordReader that de-serializes records from the given input gate and * can spill partial records to disk, if they grow large. * * @param inputGate The input gate to read from. * @param tmpDirectories The temp directories. USed for spilling if the reader concurrently * reconstructs multiple large records. */ @SuppressWarnings("unchecked") protected AbstractRecordReader(InputGate inputGate, String[] tmpDirectories) { super(inputGate); // Initialize one deserializer per input channel this.recordDeserializers = new SpillingAdaptiveSpanningRecordDeserializer[inputGate.getNumberOfInputChannels()]; for (int i = 0; i < recordDeserializers.length; i++) { recordDeserializers[i] = new SpillingAdaptiveSpanningRecordDeserializer<T>(tmpDirectories); } }
Example #25
Source File: StreamTask.java From flink with Apache License 2.0 | 5 votes |
private void requestPartitions() throws IOException { InputGate[] inputGates = getEnvironment().getAllInputGates(); if (inputGates != null) { for (InputGate inputGate : inputGates) { inputGate.requestPartitions(); } } }
Example #26
Source File: NettyShuffleMetricFactory.java From flink with Apache License 2.0 | 5 votes |
/** * Registers legacy network metric groups before shuffle service refactoring. * * <p>Registers legacy metric groups if shuffle service implementation is original default one. * * @deprecated should be removed in future */ @SuppressWarnings("DeprecatedIsStillUsed") @Deprecated public static void registerLegacyNetworkMetrics( boolean isDetailedMetrics, boolean isCreditBased, MetricGroup metricGroup, ResultPartitionWriter[] producedPartitions, InputGate[] inputGates) { checkNotNull(metricGroup); checkNotNull(producedPartitions); checkNotNull(inputGates); // add metrics for buffers final MetricGroup buffersGroup = metricGroup.addGroup(METRIC_GROUP_BUFFERS_DEPRECATED); // similar to MetricUtils.instantiateNetworkMetrics() but inside this IOMetricGroup (metricGroup) final MetricGroup networkGroup = metricGroup.addGroup(METRIC_GROUP_NETWORK_DEPRECATED); final MetricGroup outputGroup = networkGroup.addGroup(METRIC_GROUP_OUTPUT); final MetricGroup inputGroup = networkGroup.addGroup(METRIC_GROUP_INPUT); ResultPartition[] resultPartitions = Arrays.copyOf(producedPartitions, producedPartitions.length, ResultPartition[].class); registerOutputMetrics(isDetailedMetrics, outputGroup, buffersGroup, resultPartitions); SingleInputGate[] singleInputGates = Arrays.copyOf(inputGates, inputGates.length, SingleInputGate[].class); registerInputMetrics(isDetailedMetrics, isCreditBased, inputGroup, buffersGroup, singleInputGates); }
Example #27
Source File: NettyShuffleEnvironment.java From flink with Apache License 2.0 | 5 votes |
/** * Registers legacy network metric groups before shuffle service refactoring. * * <p>Registers legacy metric groups if shuffle service implementation is original default one. * * @deprecated should be removed in future */ @SuppressWarnings("DeprecatedIsStillUsed") @Deprecated public void registerLegacyNetworkMetrics( MetricGroup metricGroup, ResultPartitionWriter[] producedPartitions, InputGate[] inputGates) { NettyShuffleMetricFactory.registerLegacyNetworkMetrics( config.isNetworkDetailedMetrics(), config.isCreditBased(), metricGroup, producedPartitions, inputGates); }
Example #28
Source File: CheckpointBarrierAligner.java From flink with Apache License 2.0 | 5 votes |
CheckpointBarrierAligner( String taskName, AbstractInvokable toNotifyOnCheckpoint, InputGate... inputGates) { super(toNotifyOnCheckpoint); this.taskName = taskName; this.inputGates = inputGates; blockedChannels = Arrays.stream(inputGates) .flatMap(gate -> gate.getChannelInfos().stream()) .collect(Collectors.toMap(Function.identity(), info -> false)); totalNumberOfInputChannels = blockedChannels.size(); }
Example #29
Source File: InputGateUtil.java From flink with Apache License 2.0 | 5 votes |
public static InputGate createInputGate(InputGate[] inputGates) { if (inputGates.length <= 0) { throw new RuntimeException("No such input gate."); } if (inputGates.length < 2) { return inputGates[0]; } else { return new UnionInputGate(inputGates); } }
Example #30
Source File: AbstractReader.java From flink with Apache License 2.0 | 4 votes |
protected AbstractReader(InputGate inputGate) { this.inputGate = inputGate; }