org.apache.flink.core.fs.RecoverableWriter.ResumeRecoverable Java Examples
The following examples show how to use
org.apache.flink.core.fs.RecoverableWriter.ResumeRecoverable.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Bucket.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private void restoreInProgressFile(final BucketState<BucketID> state) throws IOException { if (!state.hasInProgressResumableFile()) { return; } // we try to resume the previous in-progress file final ResumeRecoverable resumable = state.getInProgressResumableFile(); if (fsWriter.supportsResume()) { final RecoverableFsDataOutputStream stream = fsWriter.recover(resumable); inProgressPart = partFileFactory.resumeFrom( bucketId, stream, resumable, state.getInProgressFileCreationTime()); } else { // if the writer does not support resume, then we close the // in-progress part and commit it, as done in the case of pending files. fsWriter.recoverForCommit(resumable).commitAfterRecovery(); } if (fsWriter.requiresCleanupOfRecoverableState()) { fsWriter.cleanupRecoverableState(resumable); } }
Example #2
Source File: Bucket.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
BucketState<BucketID> onReceptionOfCheckpoint(long checkpointId) throws IOException { prepareBucketForCheckpointing(checkpointId); ResumeRecoverable inProgressResumable = null; long inProgressFileCreationTime = Long.MAX_VALUE; if (inProgressPart != null) { inProgressResumable = inProgressPart.persist(); inProgressFileCreationTime = inProgressPart.getCreationTime(); // the following is an optimization so that writers that do not // require cleanup, they do not have to keep track of resumables // and later iterate over the active buckets. // (see onSuccessfulCompletionOfCheckpoint()) if (fsWriter.requiresCleanupOfRecoverableState()) { this.resumablesPerCheckpoint.put(checkpointId, inProgressResumable); } } return new BucketState<>(bucketId, bucketPath, inProgressFileCreationTime, inProgressResumable, pendingPartsPerCheckpoint); }
Example #3
Source File: Bucket.java From flink with Apache License 2.0 | 6 votes |
private void restoreInProgressFile(final BucketState<BucketID> state) throws IOException { if (!state.hasInProgressResumableFile()) { return; } // we try to resume the previous in-progress file final ResumeRecoverable resumable = state.getInProgressResumableFile(); if (fsWriter.supportsResume()) { final RecoverableFsDataOutputStream stream = fsWriter.recover(resumable); inProgressPart = partFileFactory.resumeFrom( bucketId, stream, resumable, state.getInProgressFileCreationTime()); } else { // if the writer does not support resume, then we close the // in-progress part and commit it, as done in the case of pending files. fsWriter.recoverForCommit(resumable).commitAfterRecovery(); } if (fsWriter.requiresCleanupOfRecoverableState()) { fsWriter.cleanupRecoverableState(resumable); } }
Example #4
Source File: Bucket.java From flink with Apache License 2.0 | 6 votes |
BucketState<BucketID> onReceptionOfCheckpoint(long checkpointId) throws IOException { prepareBucketForCheckpointing(checkpointId); ResumeRecoverable inProgressResumable = null; long inProgressFileCreationTime = Long.MAX_VALUE; if (inProgressPart != null) { inProgressResumable = inProgressPart.persist(); inProgressFileCreationTime = inProgressPart.getCreationTime(); // the following is an optimization so that writers that do not // require cleanup, they do not have to keep track of resumables // and later iterate over the active buckets. // (see onSuccessfulCompletionOfCheckpoint()) if (fsWriter.requiresCleanupOfRecoverableState()) { this.resumablesPerCheckpoint.put(checkpointId, inProgressResumable); } } return new BucketState<>(bucketId, bucketPath, inProgressFileCreationTime, inProgressResumable, pendingPartsPerCheckpoint); }
Example #5
Source File: LocalRecoverableFsDataOutputStream.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public ResumeRecoverable persist() throws IOException { // we call both flush and sync in order to ensure persistence on mounted // file systems, like NFS, EBS, EFS, ... flush(); sync(); return new LocalRecoverable(targetFile, tempFile, getPos()); }
Example #6
Source File: Bucket.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void cleanupOutdatedResumables(long checkpointId) throws IOException { Iterator<Map.Entry<Long, ResumeRecoverable>> it = resumablesPerCheckpoint.headMap(checkpointId, false) .entrySet().iterator(); while (it.hasNext()) { final ResumeRecoverable recoverable = it.next().getValue(); final boolean successfullyDeleted = fsWriter.cleanupRecoverableState(recoverable); it.remove(); if (LOG.isDebugEnabled() && successfullyDeleted) { LOG.debug("Subtask {} successfully deleted incomplete part for bucket id={}.", subtaskIndex, bucketId); } } }
Example #7
Source File: LocalRecoverableFsDataOutputStream.java From flink with Apache License 2.0 | 5 votes |
@Override public ResumeRecoverable persist() throws IOException { // we call both flush and sync in order to ensure persistence on mounted // file systems, like NFS, EBS, EFS, ... flush(); sync(); return new LocalRecoverable(targetFile, tempFile, getPos()); }
Example #8
Source File: Bucket.java From flink with Apache License 2.0 | 5 votes |
private void cleanupOutdatedResumables(long checkpointId) throws IOException { Iterator<Map.Entry<Long, ResumeRecoverable>> it = resumablesPerCheckpoint.headMap(checkpointId, false) .entrySet().iterator(); while (it.hasNext()) { final ResumeRecoverable recoverable = it.next().getValue(); final boolean successfullyDeleted = fsWriter.cleanupRecoverableState(recoverable); it.remove(); if (LOG.isDebugEnabled() && successfullyDeleted) { LOG.debug("Subtask {} successfully deleted incomplete part for bucket id={}.", subtaskIndex, bucketId); } } }
Example #9
Source File: LocalRecoverableFsDataOutputStream.java From flink with Apache License 2.0 | 5 votes |
@Override public ResumeRecoverable persist() throws IOException { // we call both flush and sync in order to ensure persistence on mounted // file systems, like NFS, EBS, EFS, ... flush(); sync(); return new LocalRecoverable(targetFile, tempFile, getPos()); }
Example #10
Source File: HadoopRecoverableFsDataOutputStream.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Override public ResumeRecoverable persist() throws IOException { sync(); return new HadoopFsRecoverable(targetFile, tempFile, getPos()); }
Example #11
Source File: HadoopRecoverableFsDataOutputStream.java From flink with Apache License 2.0 | 4 votes |
@Override public ResumeRecoverable persist() throws IOException { sync(); return new HadoopFsRecoverable(targetFile, tempFile, getPos()); }
Example #12
Source File: HadoopRecoverableFsDataOutputStream.java From flink with Apache License 2.0 | 4 votes |
@Override public ResumeRecoverable persist() throws IOException { sync(); return new HadoopFsRecoverable(targetFile, tempFile, getPos()); }
Example #13
Source File: RecoverableFsDataOutputStream.java From Flink-CEPplus with Apache License 2.0 | 2 votes |
/** * Ensures all data so far is persistent (similar to {@link #sync()}) and returns * a handle to recover the stream at the current position. */ public abstract ResumeRecoverable persist() throws IOException;
Example #14
Source File: RecoverableFsDataOutputStream.java From flink with Apache License 2.0 | 2 votes |
/** * Ensures all data so far is persistent (similar to {@link #sync()}) and returns * a handle to recover the stream at the current position. */ public abstract ResumeRecoverable persist() throws IOException;
Example #15
Source File: RecoverableFsDataOutputStream.java From flink with Apache License 2.0 | 2 votes |
/** * Ensures all data so far is persistent (similar to {@link #sync()}) and returns * a handle to recover the stream at the current position. */ public abstract ResumeRecoverable persist() throws IOException;