org.apache.flink.runtime.io.disk.InputViewIterator Java Examples
The following examples show how to use
org.apache.flink.runtime.io.disk.InputViewIterator.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TempBarrier.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * * This method resets the input! * * @see org.apache.flink.runtime.operators.util.CloseableInputProvider#getIterator() */ @Override public MutableObjectIterator<T> getIterator() throws InterruptedException, IOException { synchronized (this.lock) { while (this.exception == null && !this.writingDone) { this.lock.wait(5000); } } if (this.exception != null) { throw new RuntimeException("An error occurred creating the temp table.", this.exception); } else if (this.writingDone) { final DataInputView in = this.buffer.flip(); return new InputViewIterator<T>(in, this.serializer); } else { return null; } }
Example #2
Source File: TempBarrier.java From flink with Apache License 2.0 | 6 votes |
/** * * This method resets the input! * * @see org.apache.flink.runtime.operators.util.CloseableInputProvider#getIterator() */ @Override public MutableObjectIterator<T> getIterator() throws InterruptedException, IOException { synchronized (this.lock) { while (this.exception == null && !this.writingDone) { this.lock.wait(5000); } } if (this.exception != null) { throw new RuntimeException("An error occurred creating the temp table.", this.exception); } else if (this.writingDone) { final DataInputView in = this.buffer.flip(); return new InputViewIterator<T>(in, this.serializer); } else { return null; } }
Example #3
Source File: TempBarrier.java From flink with Apache License 2.0 | 6 votes |
/** * * This method resets the input! * * @see org.apache.flink.runtime.operators.util.CloseableInputProvider#getIterator() */ @Override public MutableObjectIterator<T> getIterator() throws InterruptedException, IOException { synchronized (this.lock) { while (this.exception == null && !this.writingDone) { this.lock.wait(5000); } } if (this.exception != null) { throw new RuntimeException("An error occurred creating the temp table.", this.exception); } else if (this.writingDone) { final DataInputView in = this.buffer.flip(); return new InputViewIterator<T>(in, this.serializer); } else { return null; } }
Example #4
Source File: IterationHeadTask.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
private void feedBackSuperstepResult(DataInputView superstepResult) { this.inputs[this.feedbackDataInput] = new InputViewIterator<Y>(superstepResult, this.feedbackTypeSerializer.getSerializer()); }
Example #5
Source File: LargeRecordHandler.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
public MutableObjectIterator<T> finishWriteAndSortKeys(List<MemorySegment> memory) throws IOException { if (recordsOutFile == null || keysOutFile == null) { throw new IllegalStateException("The LargeRecordHandler has not spilled any records"); } // close the writers and final int lastBlockBytesKeys; final int lastBlockBytesRecords; recordsOutFile.close(); keysOutFile.close(); lastBlockBytesKeys = keysOutFile.getBytesInLatestSegment(); lastBlockBytesRecords = recordsOutFile.getBytesInLatestSegment(); recordsOutFile = null; keysOutFile = null; final int pagesForReaders = Math.max(3*MIN_SEGMENTS_FOR_KEY_SPILLING, Math.min(2*MAX_SEGMENTS_FOR_KEY_SPILLING, memory.size() / 50)); final int pagesForKeyReader = Math.min(pagesForReaders - MIN_SEGMENTS_FOR_KEY_SPILLING, MAX_SEGMENTS_FOR_KEY_SPILLING); final int pagesForRecordReader = pagesForReaders - pagesForKeyReader; // grab memory for the record reader ArrayList<MemorySegment> memForRecordReader = new ArrayList<MemorySegment>(); ArrayList<MemorySegment> memForKeysReader = new ArrayList<MemorySegment>(); for (int i = 0; i < pagesForRecordReader; i++) { memForRecordReader.add(memory.remove(memory.size() - 1)); } for (int i = 0; i < pagesForKeyReader; i++) { memForKeysReader.add(memory.remove(memory.size() - 1)); } keysReader = new FileChannelInputView(ioManager.createBlockChannelReader(keysChannel), memManager, memForKeysReader, lastBlockBytesKeys); InputViewIterator<Tuple> keyIterator = new InputViewIterator<Tuple>(keysReader, keySerializer); keySorter = new UnilateralSortMerger<Tuple>(memManager, memory, ioManager, keyIterator, memoryOwner, keySerializerFactory, keyComparator, 1, maxFilehandles, 1.0f, false, this.executionConfig.isObjectReuseEnabled()); // wait for the sorter to sort the keys MutableObjectIterator<Tuple> result; try { result = keySorter.getIterator(); } catch (InterruptedException e) { throw new IOException(e); } recordsReader = new SeekableFileChannelInputView(ioManager, recordsChannel, memManager, memForRecordReader, lastBlockBytesRecords); return new FetchingIterator<T>(serializer, result, recordsReader, keySerializer, numKeyFields); }
Example #6
Source File: GenericWriteAheadSink.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Override public void notifyCheckpointComplete(long checkpointId) throws Exception { super.notifyCheckpointComplete(checkpointId); synchronized (pendingCheckpoints) { Iterator<PendingCheckpoint> pendingCheckpointIt = pendingCheckpoints.iterator(); while (pendingCheckpointIt.hasNext()) { PendingCheckpoint pendingCheckpoint = pendingCheckpointIt.next(); long pastCheckpointId = pendingCheckpoint.checkpointId; int subtaskId = pendingCheckpoint.subtaskId; long timestamp = pendingCheckpoint.timestamp; StreamStateHandle streamHandle = pendingCheckpoint.stateHandle; if (pastCheckpointId <= checkpointId) { try { if (!committer.isCheckpointCommitted(subtaskId, pastCheckpointId)) { try (FSDataInputStream in = streamHandle.openInputStream()) { boolean success = sendValues( new ReusingMutableToRegularIteratorWrapper<>( new InputViewIterator<>( new DataInputViewStreamWrapper( in), serializer), serializer), pastCheckpointId, timestamp); if (success) { // in case the checkpoint was successfully committed, // discard its state from the backend and mark it for removal // in case it failed, we retry on the next checkpoint committer.commitCheckpoint(subtaskId, pastCheckpointId); streamHandle.discardState(); pendingCheckpointIt.remove(); } } } else { streamHandle.discardState(); pendingCheckpointIt.remove(); } } catch (Exception e) { // we have to break here to prevent a new (later) checkpoint // from being committed before this one LOG.error("Could not commit checkpoint.", e); break; } } } } }
Example #7
Source File: IterationHeadTask.java From flink with Apache License 2.0 | 4 votes |
private void feedBackSuperstepResult(DataInputView superstepResult) { this.inputs[this.feedbackDataInput] = new InputViewIterator<Y>(superstepResult, this.feedbackTypeSerializer.getSerializer()); }
Example #8
Source File: LargeRecordHandler.java From flink with Apache License 2.0 | 4 votes |
public MutableObjectIterator<T> finishWriteAndSortKeys(List<MemorySegment> memory) throws IOException { if (recordsOutFile == null || keysOutFile == null) { throw new IllegalStateException("The LargeRecordHandler has not spilled any records"); } // close the writers and final int lastBlockBytesKeys; final int lastBlockBytesRecords; recordsOutFile.close(); keysOutFile.close(); lastBlockBytesKeys = keysOutFile.getBytesInLatestSegment(); lastBlockBytesRecords = recordsOutFile.getBytesInLatestSegment(); recordsOutFile = null; keysOutFile = null; final int pagesForReaders = Math.max(3*MIN_SEGMENTS_FOR_KEY_SPILLING, Math.min(2*MAX_SEGMENTS_FOR_KEY_SPILLING, memory.size() / 50)); final int pagesForKeyReader = Math.min(pagesForReaders - MIN_SEGMENTS_FOR_KEY_SPILLING, MAX_SEGMENTS_FOR_KEY_SPILLING); final int pagesForRecordReader = pagesForReaders - pagesForKeyReader; // grab memory for the record reader ArrayList<MemorySegment> memForRecordReader = new ArrayList<MemorySegment>(); ArrayList<MemorySegment> memForKeysReader = new ArrayList<MemorySegment>(); for (int i = 0; i < pagesForRecordReader; i++) { memForRecordReader.add(memory.remove(memory.size() - 1)); } for (int i = 0; i < pagesForKeyReader; i++) { memForKeysReader.add(memory.remove(memory.size() - 1)); } keysReader = new FileChannelInputView(ioManager.createBlockChannelReader(keysChannel), memManager, memForKeysReader, lastBlockBytesKeys); InputViewIterator<Tuple> keyIterator = new InputViewIterator<Tuple>(keysReader, keySerializer); keySorter = new UnilateralSortMerger<Tuple>(memManager, memory, ioManager, keyIterator, memoryOwner, keySerializerFactory, keyComparator, 1, maxFilehandles, 1.0f, false, this.executionConfig.isObjectReuseEnabled()); // wait for the sorter to sort the keys MutableObjectIterator<Tuple> result; try { result = keySorter.getIterator(); } catch (InterruptedException e) { throw new IOException(e); } recordsReader = new SeekableFileChannelInputView(ioManager, recordsChannel, memManager, memForRecordReader, lastBlockBytesRecords); return new FetchingIterator<T>(serializer, result, recordsReader, keySerializer, numKeyFields); }
Example #9
Source File: GenericWriteAheadSink.java From flink with Apache License 2.0 | 4 votes |
@Override public void notifyCheckpointComplete(long checkpointId) throws Exception { super.notifyCheckpointComplete(checkpointId); synchronized (pendingCheckpoints) { Iterator<PendingCheckpoint> pendingCheckpointIt = pendingCheckpoints.iterator(); while (pendingCheckpointIt.hasNext()) { PendingCheckpoint pendingCheckpoint = pendingCheckpointIt.next(); long pastCheckpointId = pendingCheckpoint.checkpointId; int subtaskId = pendingCheckpoint.subtaskId; long timestamp = pendingCheckpoint.timestamp; StreamStateHandle streamHandle = pendingCheckpoint.stateHandle; if (pastCheckpointId <= checkpointId) { try { if (!committer.isCheckpointCommitted(subtaskId, pastCheckpointId)) { try (FSDataInputStream in = streamHandle.openInputStream()) { boolean success = sendValues( new ReusingMutableToRegularIteratorWrapper<>( new InputViewIterator<>( new DataInputViewStreamWrapper( in), serializer), serializer), pastCheckpointId, timestamp); if (success) { // in case the checkpoint was successfully committed, // discard its state from the backend and mark it for removal // in case it failed, we retry on the next checkpoint committer.commitCheckpoint(subtaskId, pastCheckpointId); streamHandle.discardState(); pendingCheckpointIt.remove(); } } } else { streamHandle.discardState(); pendingCheckpointIt.remove(); } } catch (Exception e) { // we have to break here to prevent a new (later) checkpoint // from being committed before this one LOG.error("Could not commit checkpoint.", e); break; } } } } }
Example #10
Source File: IterationHeadTask.java From flink with Apache License 2.0 | 4 votes |
private void feedBackSuperstepResult(DataInputView superstepResult) { this.inputs[this.feedbackDataInput] = new InputViewIterator<Y>(superstepResult, this.feedbackTypeSerializer.getSerializer()); }
Example #11
Source File: LargeRecordHandler.java From flink with Apache License 2.0 | 4 votes |
public MutableObjectIterator<T> finishWriteAndSortKeys(List<MemorySegment> memory) throws IOException { if (recordsOutFile == null || keysOutFile == null) { throw new IllegalStateException("The LargeRecordHandler has not spilled any records"); } // close the writers and final int lastBlockBytesKeys; final int lastBlockBytesRecords; recordsOutFile.close(); keysOutFile.close(); lastBlockBytesKeys = keysOutFile.getBytesInLatestSegment(); lastBlockBytesRecords = recordsOutFile.getBytesInLatestSegment(); recordsOutFile = null; keysOutFile = null; final int pagesForReaders = Math.max(3*MIN_SEGMENTS_FOR_KEY_SPILLING, Math.min(2*MAX_SEGMENTS_FOR_KEY_SPILLING, memory.size() / 50)); final int pagesForKeyReader = Math.min(pagesForReaders - MIN_SEGMENTS_FOR_KEY_SPILLING, MAX_SEGMENTS_FOR_KEY_SPILLING); final int pagesForRecordReader = pagesForReaders - pagesForKeyReader; // grab memory for the record reader ArrayList<MemorySegment> memForRecordReader = new ArrayList<MemorySegment>(); ArrayList<MemorySegment> memForKeysReader = new ArrayList<MemorySegment>(); for (int i = 0; i < pagesForRecordReader; i++) { memForRecordReader.add(memory.remove(memory.size() - 1)); } for (int i = 0; i < pagesForKeyReader; i++) { memForKeysReader.add(memory.remove(memory.size() - 1)); } keysReader = new FileChannelInputView(ioManager.createBlockChannelReader(keysChannel), memManager, memForKeysReader, lastBlockBytesKeys); InputViewIterator<Tuple> keyIterator = new InputViewIterator<Tuple>(keysReader, keySerializer); keySorter = new UnilateralSortMerger<Tuple>(memManager, memory, ioManager, keyIterator, memoryOwner, keySerializerFactory, keyComparator, 1, maxFilehandles, 1.0f, false, this.executionConfig.isObjectReuseEnabled()); // wait for the sorter to sort the keys MutableObjectIterator<Tuple> result; try { result = keySorter.getIterator(); } catch (InterruptedException e) { throw new IOException(e); } recordsReader = new SeekableFileChannelInputView(ioManager, recordsChannel, memManager, memForRecordReader, lastBlockBytesRecords); return new FetchingIterator<T>(serializer, result, recordsReader, keySerializer, numKeyFields); }
Example #12
Source File: GenericWriteAheadSink.java From flink with Apache License 2.0 | 4 votes |
@Override public void notifyCheckpointComplete(long checkpointId) throws Exception { super.notifyCheckpointComplete(checkpointId); synchronized (pendingCheckpoints) { Iterator<PendingCheckpoint> pendingCheckpointIt = pendingCheckpoints.iterator(); while (pendingCheckpointIt.hasNext()) { PendingCheckpoint pendingCheckpoint = pendingCheckpointIt.next(); long pastCheckpointId = pendingCheckpoint.checkpointId; int subtaskId = pendingCheckpoint.subtaskId; long timestamp = pendingCheckpoint.timestamp; StreamStateHandle streamHandle = pendingCheckpoint.stateHandle; if (pastCheckpointId <= checkpointId) { try { if (!committer.isCheckpointCommitted(subtaskId, pastCheckpointId)) { try (FSDataInputStream in = streamHandle.openInputStream()) { boolean success = sendValues( new ReusingMutableToRegularIteratorWrapper<>( new InputViewIterator<>( new DataInputViewStreamWrapper( in), serializer), serializer), pastCheckpointId, timestamp); if (success) { // in case the checkpoint was successfully committed, // discard its state from the backend and mark it for removal // in case it failed, we retry on the next checkpoint committer.commitCheckpoint(subtaskId, pastCheckpointId); streamHandle.discardState(); pendingCheckpointIt.remove(); } } } else { streamHandle.discardState(); pendingCheckpointIt.remove(); } } catch (Exception e) { // we have to break here to prevent a new (later) checkpoint // from being committed before this one LOG.error("Could not commit checkpoint.", e); break; } } } } }