org.apache.flink.runtime.io.network.api.reader.MutableRecordReader Java Examples
The following examples show how to use
org.apache.flink.runtime.io.network.api.reader.MutableRecordReader.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BatchTask.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Creates the record readers for the number of inputs as defined by {@link #getNumTaskInputs()}. * * This method requires that the task configuration, the driver, and the user-code class loader are set. */ protected void initInputReaders() throws Exception { final int numInputs = getNumTaskInputs(); final MutableReader<?>[] inputReaders = new MutableReader<?>[numInputs]; int currentReaderOffset = 0; for (int i = 0; i < numInputs; i++) { // ---------------- create the input readers --------------------- // in case where a logical input unions multiple physical inputs, create a union reader final int groupSize = this.config.getGroupSize(i); if (groupSize == 1) { // non-union case inputReaders[i] = new MutableRecordReader<IOReadableWritable>( getEnvironment().getInputGate(currentReaderOffset), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else if (groupSize > 1){ // union case InputGate[] readers = new InputGate[groupSize]; for (int j = 0; j < groupSize; ++j) { readers[j] = getEnvironment().getInputGate(currentReaderOffset + j); } inputReaders[i] = new MutableRecordReader<IOReadableWritable>( new UnionInputGate(readers), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else { throw new Exception("Illegal input group size in task configuration: " + groupSize); } currentReaderOffset += groupSize; } this.inputReaders = inputReaders; // final sanity check if (currentReaderOffset != this.config.getNumInputs()) { throw new Exception("Illegal configuration: Number of input gates and group sizes are not consistent."); } }
Example #2
Source File: BatchTask.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Creates the record readers for the extra broadcast inputs as configured by {@link TaskConfig#getNumBroadcastInputs()}. * * This method requires that the task configuration, the driver, and the user-code class loader are set. */ protected void initBroadcastInputReaders() throws Exception { final int numBroadcastInputs = this.config.getNumBroadcastInputs(); final MutableReader<?>[] broadcastInputReaders = new MutableReader<?>[numBroadcastInputs]; int currentReaderOffset = config.getNumInputs(); for (int i = 0; i < this.config.getNumBroadcastInputs(); i++) { // ---------------- create the input readers --------------------- // in case where a logical input unions multiple physical inputs, create a union reader final int groupSize = this.config.getBroadcastGroupSize(i); if (groupSize == 1) { // non-union case broadcastInputReaders[i] = new MutableRecordReader<IOReadableWritable>( getEnvironment().getInputGate(currentReaderOffset), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else if (groupSize > 1){ // union case InputGate[] readers = new InputGate[groupSize]; for (int j = 0; j < groupSize; ++j) { readers[j] = getEnvironment().getInputGate(currentReaderOffset + j); } broadcastInputReaders[i] = new MutableRecordReader<IOReadableWritable>( new UnionInputGate(readers), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else { throw new Exception("Illegal input group size in task configuration: " + groupSize); } currentReaderOffset += groupSize; } this.broadcastInputReaders = broadcastInputReaders; }
Example #3
Source File: DataSinkTask.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Initializes the input readers of the DataSinkTask. * * @throws RuntimeException * Thrown in case of invalid task input configuration. */ @SuppressWarnings("unchecked") private void initInputReaders() throws Exception { int numGates = 0; // ---------------- create the input readers --------------------- // in case where a logical input unions multiple physical inputs, create a union reader final int groupSize = this.config.getGroupSize(0); numGates += groupSize; if (groupSize == 1) { // non-union case inputReader = new MutableRecordReader<DeserializationDelegate<IT>>( getEnvironment().getInputGate(0), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else if (groupSize > 1){ // union case inputReader = new MutableRecordReader<IOReadableWritable>( new UnionInputGate(getEnvironment().getAllInputGates()), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else { throw new Exception("Illegal input group size in task configuration: " + groupSize); } this.inputTypeSerializerFactory = this.config.getInputSerializer(0, getUserCodeClassLoader()); @SuppressWarnings({ "rawtypes" }) final MutableObjectIterator<?> iter = new ReaderIterator(inputReader, this.inputTypeSerializerFactory.getSerializer()); this.reader = (MutableObjectIterator<IT>)iter; // final sanity check if (numGates != this.config.getNumInputs()) { throw new Exception("Illegal configuration: Number of input gates and group sizes are not consistent."); } }
Example #4
Source File: SerializingLongReceiver.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@SuppressWarnings("WeakerAccess") public SerializingLongReceiver(InputGate inputGate, int expectedRepetitionsOfExpectedRecord) { super(expectedRepetitionsOfExpectedRecord); this.reader = new MutableRecordReader<>( inputGate, new String[]{ EnvironmentInformation.getTemporaryFileDirectory() }); }
Example #5
Source File: BatchTask.java From flink with Apache License 2.0 | 5 votes |
/** * Creates the record readers for the number of inputs as defined by {@link #getNumTaskInputs()}. * * This method requires that the task configuration, the driver, and the user-code class loader are set. */ protected void initInputReaders() throws Exception { final int numInputs = getNumTaskInputs(); final MutableReader<?>[] inputReaders = new MutableReader<?>[numInputs]; int currentReaderOffset = 0; for (int i = 0; i < numInputs; i++) { // ---------------- create the input readers --------------------- // in case where a logical input unions multiple physical inputs, create a union reader final int groupSize = this.config.getGroupSize(i); if (groupSize == 1) { // non-union case inputReaders[i] = new MutableRecordReader<IOReadableWritable>( getEnvironment().getInputGate(currentReaderOffset), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else if (groupSize > 1){ // union case InputGate[] readers = new InputGate[groupSize]; for (int j = 0; j < groupSize; ++j) { readers[j] = getEnvironment().getInputGate(currentReaderOffset + j); } inputReaders[i] = new MutableRecordReader<IOReadableWritable>( new UnionInputGate(readers), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else { throw new Exception("Illegal input group size in task configuration: " + groupSize); } currentReaderOffset += groupSize; } this.inputReaders = inputReaders; // final sanity check if (currentReaderOffset != this.config.getNumInputs()) { throw new Exception("Illegal configuration: Number of input gates and group sizes are not consistent."); } }
Example #6
Source File: BatchTask.java From flink with Apache License 2.0 | 5 votes |
/** * Creates the record readers for the extra broadcast inputs as configured by {@link TaskConfig#getNumBroadcastInputs()}. * * This method requires that the task configuration, the driver, and the user-code class loader are set. */ protected void initBroadcastInputReaders() throws Exception { final int numBroadcastInputs = this.config.getNumBroadcastInputs(); final MutableReader<?>[] broadcastInputReaders = new MutableReader<?>[numBroadcastInputs]; int currentReaderOffset = config.getNumInputs(); for (int i = 0; i < this.config.getNumBroadcastInputs(); i++) { // ---------------- create the input readers --------------------- // in case where a logical input unions multiple physical inputs, create a union reader final int groupSize = this.config.getBroadcastGroupSize(i); if (groupSize == 1) { // non-union case broadcastInputReaders[i] = new MutableRecordReader<IOReadableWritable>( getEnvironment().getInputGate(currentReaderOffset), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else if (groupSize > 1){ // union case InputGate[] readers = new InputGate[groupSize]; for (int j = 0; j < groupSize; ++j) { readers[j] = getEnvironment().getInputGate(currentReaderOffset + j); } broadcastInputReaders[i] = new MutableRecordReader<IOReadableWritable>( new UnionInputGate(readers), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else { throw new Exception("Illegal input group size in task configuration: " + groupSize); } currentReaderOffset += groupSize; } this.broadcastInputReaders = broadcastInputReaders; }
Example #7
Source File: DataSinkTask.java From flink with Apache License 2.0 | 5 votes |
/** * Initializes the input readers of the DataSinkTask. * * @throws RuntimeException * Thrown in case of invalid task input configuration. */ @SuppressWarnings("unchecked") private void initInputReaders() throws Exception { int numGates = 0; // ---------------- create the input readers --------------------- // in case where a logical input unions multiple physical inputs, create a union reader final int groupSize = this.config.getGroupSize(0); numGates += groupSize; if (groupSize == 1) { // non-union case inputReader = new MutableRecordReader<DeserializationDelegate<IT>>( getEnvironment().getInputGate(0), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else if (groupSize > 1){ // union case inputReader = new MutableRecordReader<IOReadableWritable>( new UnionInputGate(getEnvironment().getAllInputGates()), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else { throw new Exception("Illegal input group size in task configuration: " + groupSize); } this.inputTypeSerializerFactory = this.config.getInputSerializer(0, getUserCodeClassLoader()); @SuppressWarnings({ "rawtypes" }) final MutableObjectIterator<?> iter = new ReaderIterator(inputReader, this.inputTypeSerializerFactory.getSerializer()); this.reader = (MutableObjectIterator<IT>)iter; // final sanity check if (numGates != this.config.getNumInputs()) { throw new Exception("Illegal configuration: Number of input gates and group sizes are not consistent."); } }
Example #8
Source File: SerializingLongReceiver.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("WeakerAccess") public SerializingLongReceiver(InputGate inputGate, int expectedRepetitionsOfExpectedRecord) { super(expectedRepetitionsOfExpectedRecord); this.reader = new MutableRecordReader<>( inputGate, new String[]{ EnvironmentInformation.getTemporaryFileDirectory() }); }
Example #9
Source File: ShuffleCompressionITCase.java From flink with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { MutableRecordReader<LongValue> reader = new MutableRecordReader<>( getEnvironment().getInputGate(0), new String[]{EnvironmentInformation.getTemporaryFileDirectory()}); LongValue value = new LongValue(); for (int i = 0; i < PARALLELISM * NUM_RECORDS_TO_SEND; ++i) { reader.next(value); assertEquals(RECORD_TO_SEND.getValue(), value.getValue()); } }
Example #10
Source File: BatchTask.java From flink with Apache License 2.0 | 5 votes |
/** * Creates the record readers for the number of inputs as defined by {@link #getNumTaskInputs()}. * * This method requires that the task configuration, the driver, and the user-code class loader are set. */ protected void initInputReaders() throws Exception { final int numInputs = getNumTaskInputs(); final MutableReader<?>[] inputReaders = new MutableReader<?>[numInputs]; int currentReaderOffset = 0; for (int i = 0; i < numInputs; i++) { // ---------------- create the input readers --------------------- // in case where a logical input unions multiple physical inputs, create a union reader final int groupSize = this.config.getGroupSize(i); if (groupSize == 1) { // non-union case inputReaders[i] = new MutableRecordReader<IOReadableWritable>( getEnvironment().getInputGate(currentReaderOffset), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else if (groupSize > 1){ // union case IndexedInputGate[] readers = new IndexedInputGate[groupSize]; for (int j = 0; j < groupSize; ++j) { readers[j] = getEnvironment().getInputGate(currentReaderOffset + j); } inputReaders[i] = new MutableRecordReader<IOReadableWritable>( new UnionInputGate(readers), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else { throw new Exception("Illegal input group size in task configuration: " + groupSize); } currentReaderOffset += groupSize; } this.inputReaders = inputReaders; // final sanity check if (currentReaderOffset != this.config.getNumInputs()) { throw new Exception("Illegal configuration: Number of input gates and group sizes are not consistent."); } }
Example #11
Source File: BatchTask.java From flink with Apache License 2.0 | 5 votes |
/** * Creates the record readers for the extra broadcast inputs as configured by {@link TaskConfig#getNumBroadcastInputs()}. * * This method requires that the task configuration, the driver, and the user-code class loader are set. */ protected void initBroadcastInputReaders() throws Exception { final int numBroadcastInputs = this.config.getNumBroadcastInputs(); final MutableReader<?>[] broadcastInputReaders = new MutableReader<?>[numBroadcastInputs]; int currentReaderOffset = config.getNumInputs(); for (int i = 0; i < this.config.getNumBroadcastInputs(); i++) { // ---------------- create the input readers --------------------- // in case where a logical input unions multiple physical inputs, create a union reader final int groupSize = this.config.getBroadcastGroupSize(i); if (groupSize == 1) { // non-union case broadcastInputReaders[i] = new MutableRecordReader<IOReadableWritable>( getEnvironment().getInputGate(currentReaderOffset), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else if (groupSize > 1){ // union case IndexedInputGate[] readers = new IndexedInputGate[groupSize]; for (int j = 0; j < groupSize; ++j) { readers[j] = getEnvironment().getInputGate(currentReaderOffset + j); } broadcastInputReaders[i] = new MutableRecordReader<IOReadableWritable>( new UnionInputGate(readers), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else { throw new Exception("Illegal input group size in task configuration: " + groupSize); } currentReaderOffset += groupSize; } this.broadcastInputReaders = broadcastInputReaders; }
Example #12
Source File: DataSinkTask.java From flink with Apache License 2.0 | 5 votes |
/** * Initializes the input readers of the DataSinkTask. * * @throws RuntimeException * Thrown in case of invalid task input configuration. */ @SuppressWarnings("unchecked") private void initInputReaders() throws Exception { int numGates = 0; // ---------------- create the input readers --------------------- // in case where a logical input unions multiple physical inputs, create a union reader final int groupSize = this.config.getGroupSize(0); numGates += groupSize; if (groupSize == 1) { // non-union case inputReader = new MutableRecordReader<DeserializationDelegate<IT>>( getEnvironment().getInputGate(0), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else if (groupSize > 1){ // union case inputReader = new MutableRecordReader<IOReadableWritable>( new UnionInputGate(getEnvironment().getAllInputGates()), getEnvironment().getTaskManagerInfo().getTmpDirectories()); } else { throw new Exception("Illegal input group size in task configuration: " + groupSize); } this.inputTypeSerializerFactory = this.config.getInputSerializer(0, getUserCodeClassLoader()); @SuppressWarnings({ "rawtypes" }) final MutableObjectIterator<?> iter = new ReaderIterator(inputReader, this.inputTypeSerializerFactory.getSerializer()); this.reader = (MutableObjectIterator<IT>)iter; // final sanity check if (numGates != this.config.getNumInputs()) { throw new Exception("Illegal configuration: Number of input gates and group sizes are not consistent."); } }
Example #13
Source File: SerializingLongReceiver.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("WeakerAccess") public SerializingLongReceiver(InputGate inputGate, int expectedRepetitionsOfExpectedRecord) { super(expectedRepetitionsOfExpectedRecord); this.reader = new MutableRecordReader<>( inputGate, new String[]{ EnvironmentInformation.getTemporaryFileDirectory() }); }
Example #14
Source File: IterationSynchronizationSinkTask.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Override public void invoke() throws Exception { this.headEventReader = new MutableRecordReader<>( getEnvironment().getInputGate(0), getEnvironment().getTaskManagerInfo().getTmpDirectories()); TaskConfig taskConfig = new TaskConfig(getTaskConfiguration()); // store all aggregators this.aggregators = new HashMap<>(); for (AggregatorWithName<?> aggWithName : taskConfig.getIterationAggregators(getUserCodeClassLoader())) { aggregators.put(aggWithName.getName(), aggWithName.getAggregator()); } // store the aggregator convergence criterion if (taskConfig.usesConvergenceCriterion()) { convergenceCriterion = taskConfig.getConvergenceCriterion(getUserCodeClassLoader()); convergenceAggregatorName = taskConfig.getConvergenceCriterionAggregatorName(); Preconditions.checkNotNull(convergenceAggregatorName); } // store the default aggregator convergence criterion if (taskConfig.usesImplicitConvergenceCriterion()) { implicitConvergenceCriterion = taskConfig.getImplicitConvergenceCriterion(getUserCodeClassLoader()); implicitConvergenceAggregatorName = taskConfig.getImplicitConvergenceCriterionAggregatorName(); Preconditions.checkNotNull(implicitConvergenceAggregatorName); } maxNumberOfIterations = taskConfig.getNumberOfIterations(); // set up the event handler int numEventsTillEndOfSuperstep = taskConfig.getNumberOfEventsUntilInterruptInIterativeGate(0); eventHandler = new SyncEventHandler(numEventsTillEndOfSuperstep, aggregators, getEnvironment().getUserClassLoader()); headEventReader.registerTaskEventListener(eventHandler, WorkerDoneEvent.class); IntValue dummy = new IntValue(); while (!terminationRequested()) { if (log.isInfoEnabled()) { log.info(formatLogString("starting iteration [" + currentIteration + "]")); } // this call listens for events until the end-of-superstep is reached readHeadEventChannel(dummy); if (log.isInfoEnabled()) { log.info(formatLogString("finishing iteration [" + currentIteration + "]")); } if (checkForConvergence()) { if (log.isInfoEnabled()) { log.info(formatLogString("signaling that all workers are to terminate in iteration [" + currentIteration + "]")); } requestTermination(); sendToAllWorkers(new TerminationEvent()); } else { if (log.isInfoEnabled()) { log.info(formatLogString("signaling that all workers are done in iteration [" + currentIteration + "]")); } AllWorkersDoneEvent allWorkersDoneEvent = new AllWorkersDoneEvent(aggregators); sendToAllWorkers(allWorkersDoneEvent); // reset all aggregators for (Aggregator<?> agg : aggregators.values()) { agg.reset(); } currentIteration++; } } }
Example #15
Source File: IterationSynchronizationSinkTask.java From flink with Apache License 2.0 | 4 votes |
@Override public void invoke() throws Exception { this.headEventReader = new MutableRecordReader<>( getEnvironment().getInputGate(0), getEnvironment().getTaskManagerInfo().getTmpDirectories()); TaskConfig taskConfig = new TaskConfig(getTaskConfiguration()); // store all aggregators this.aggregators = new HashMap<>(); for (AggregatorWithName<?> aggWithName : taskConfig.getIterationAggregators(getUserCodeClassLoader())) { aggregators.put(aggWithName.getName(), aggWithName.getAggregator()); } // store the aggregator convergence criterion if (taskConfig.usesConvergenceCriterion()) { convergenceCriterion = taskConfig.getConvergenceCriterion(getUserCodeClassLoader()); convergenceAggregatorName = taskConfig.getConvergenceCriterionAggregatorName(); Preconditions.checkNotNull(convergenceAggregatorName); } // store the default aggregator convergence criterion if (taskConfig.usesImplicitConvergenceCriterion()) { implicitConvergenceCriterion = taskConfig.getImplicitConvergenceCriterion(getUserCodeClassLoader()); implicitConvergenceAggregatorName = taskConfig.getImplicitConvergenceCriterionAggregatorName(); Preconditions.checkNotNull(implicitConvergenceAggregatorName); } maxNumberOfIterations = taskConfig.getNumberOfIterations(); // set up the event handler int numEventsTillEndOfSuperstep = taskConfig.getNumberOfEventsUntilInterruptInIterativeGate(0); eventHandler = new SyncEventHandler(numEventsTillEndOfSuperstep, aggregators, getEnvironment().getUserClassLoader()); headEventReader.registerTaskEventListener(eventHandler, WorkerDoneEvent.class); IntValue dummy = new IntValue(); while (!terminationRequested()) { if (log.isInfoEnabled()) { log.info(formatLogString("starting iteration [" + currentIteration + "]")); } // this call listens for events until the end-of-superstep is reached readHeadEventChannel(dummy); if (log.isInfoEnabled()) { log.info(formatLogString("finishing iteration [" + currentIteration + "]")); } if (checkForConvergence()) { if (log.isInfoEnabled()) { log.info(formatLogString("signaling that all workers are to terminate in iteration [" + currentIteration + "]")); } requestTermination(); sendToAllWorkers(new TerminationEvent()); } else { if (log.isInfoEnabled()) { log.info(formatLogString("signaling that all workers are done in iteration [" + currentIteration + "]")); } AllWorkersDoneEvent allWorkersDoneEvent = new AllWorkersDoneEvent(aggregators); sendToAllWorkers(allWorkersDoneEvent); // reset all aggregators for (Aggregator<?> agg : aggregators.values()) { agg.reset(); } currentIteration++; } } }
Example #16
Source File: IterationSynchronizationSinkTask.java From flink with Apache License 2.0 | 4 votes |
@Override public void invoke() throws Exception { this.headEventReader = new MutableRecordReader<>( getEnvironment().getInputGate(0), getEnvironment().getTaskManagerInfo().getTmpDirectories()); TaskConfig taskConfig = new TaskConfig(getTaskConfiguration()); // store all aggregators this.aggregators = new HashMap<>(); for (AggregatorWithName<?> aggWithName : taskConfig.getIterationAggregators(getUserCodeClassLoader())) { aggregators.put(aggWithName.getName(), aggWithName.getAggregator()); } // store the aggregator convergence criterion if (taskConfig.usesConvergenceCriterion()) { convergenceCriterion = taskConfig.getConvergenceCriterion(getUserCodeClassLoader()); convergenceAggregatorName = taskConfig.getConvergenceCriterionAggregatorName(); Preconditions.checkNotNull(convergenceAggregatorName); } // store the default aggregator convergence criterion if (taskConfig.usesImplicitConvergenceCriterion()) { implicitConvergenceCriterion = taskConfig.getImplicitConvergenceCriterion(getUserCodeClassLoader()); implicitConvergenceAggregatorName = taskConfig.getImplicitConvergenceCriterionAggregatorName(); Preconditions.checkNotNull(implicitConvergenceAggregatorName); } maxNumberOfIterations = taskConfig.getNumberOfIterations(); // set up the event handler int numEventsTillEndOfSuperstep = taskConfig.getNumberOfEventsUntilInterruptInIterativeGate(0); eventHandler = new SyncEventHandler(numEventsTillEndOfSuperstep, aggregators, getEnvironment().getUserClassLoader()); headEventReader.registerTaskEventListener(eventHandler, WorkerDoneEvent.class); IntValue dummy = new IntValue(); while (!terminationRequested()) { if (log.isInfoEnabled()) { log.info(formatLogString("starting iteration [" + currentIteration + "]")); } // this call listens for events until the end-of-superstep is reached readHeadEventChannel(dummy); if (log.isInfoEnabled()) { log.info(formatLogString("finishing iteration [" + currentIteration + "]")); } if (checkForConvergence()) { if (log.isInfoEnabled()) { log.info(formatLogString("signaling that all workers are to terminate in iteration [" + currentIteration + "]")); } requestTermination(); sendToAllWorkers(new TerminationEvent()); } else { if (log.isInfoEnabled()) { log.info(formatLogString("signaling that all workers are done in iteration [" + currentIteration + "]")); } AllWorkersDoneEvent allWorkersDoneEvent = new AllWorkersDoneEvent(aggregators); sendToAllWorkers(allWorkersDoneEvent); // reset all aggregators for (Aggregator<?> agg : aggregators.values()) { agg.reset(); } currentIteration++; } } }