org.apache.flink.runtime.state.PlaceholderStreamStateHandle Java Examples
The following examples show how to use
org.apache.flink.runtime.state.PlaceholderStreamStateHandle.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RocksIncrementalSnapshotStrategy.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private void createUploadFilePaths( FileStatus[] fileStatuses, Map<StateHandleID, StreamStateHandle> sstFiles, Map<StateHandleID, Path> sstFilePaths, Map<StateHandleID, Path> miscFilePaths) { for (FileStatus fileStatus : fileStatuses) { final Path filePath = fileStatus.getPath(); final String fileName = filePath.getName(); final StateHandleID stateHandleID = new StateHandleID(fileName); if (fileName.endsWith(SST_FILE_SUFFIX)) { final boolean existsAlready = baseSstFiles != null && baseSstFiles.contains(stateHandleID); if (existsAlready) { // we introduce a placeholder state handle, that is replaced with the // original from the shared state registry (created from a previous checkpoint) sstFiles.put(stateHandleID, new PlaceholderStreamStateHandle()); } else { sstFilePaths.put(stateHandleID, filePath); } } else { miscFilePaths.put(stateHandleID, filePath); } } }
Example #2
Source File: RocksIncrementalSnapshotStrategy.java From flink with Apache License 2.0 | 6 votes |
private void createUploadFilePaths( FileStatus[] fileStatuses, Map<StateHandleID, StreamStateHandle> sstFiles, Map<StateHandleID, Path> sstFilePaths, Map<StateHandleID, Path> miscFilePaths) { for (FileStatus fileStatus : fileStatuses) { final Path filePath = fileStatus.getPath(); final String fileName = filePath.getName(); final StateHandleID stateHandleID = new StateHandleID(fileName); if (fileName.endsWith(SST_FILE_SUFFIX)) { final boolean existsAlready = baseSstFiles != null && baseSstFiles.contains(stateHandleID); if (existsAlready) { // we introduce a placeholder state handle, that is replaced with the // original from the shared state registry (created from a previous checkpoint) sstFiles.put(stateHandleID, new PlaceholderStreamStateHandle()); } else { sstFilePaths.put(stateHandleID, filePath); } } else { miscFilePaths.put(stateHandleID, filePath); } } }
Example #3
Source File: RocksIncrementalSnapshotStrategy.java From flink with Apache License 2.0 | 6 votes |
private void createUploadFilePaths( Path[] files, Map<StateHandleID, StreamStateHandle> sstFiles, Map<StateHandleID, Path> sstFilePaths, Map<StateHandleID, Path> miscFilePaths) { for (Path filePath : files) { final String fileName = filePath.getFileName().toString(); final StateHandleID stateHandleID = new StateHandleID(fileName); if (fileName.endsWith(SST_FILE_SUFFIX)) { final boolean existsAlready = baseSstFiles != null && baseSstFiles.contains(stateHandleID); if (existsAlready) { // we introduce a placeholder state handle, that is replaced with the // original from the shared state registry (created from a previous checkpoint) sstFiles.put(stateHandleID, new PlaceholderStreamStateHandle()); } else { sstFilePaths.put(stateHandleID, filePath); } } else { miscFilePaths.put(stateHandleID, filePath); } } }
Example #4
Source File: CheckpointCoordinatorTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
private void performIncrementalCheckpoint( JobID jid, CheckpointCoordinator coord, ExecutionJobVertex jobVertex1, List<KeyGroupRange> keyGroupPartitions1, long timestamp, int cpSequenceNumber) throws Exception { // trigger the checkpoint coord.triggerCheckpoint(timestamp, false); assertTrue(coord.getPendingCheckpoints().keySet().size() == 1); long checkpointId = Iterables.getOnlyElement(coord.getPendingCheckpoints().keySet()); for (int index = 0; index < jobVertex1.getParallelism(); index++) { KeyGroupRange keyGroupRange = keyGroupPartitions1.get(index); Map<StateHandleID, StreamStateHandle> privateState = new HashMap<>(); privateState.put( new StateHandleID("private-1"), spy(new ByteStreamStateHandle("private-1", new byte[]{'p'}))); Map<StateHandleID, StreamStateHandle> sharedState = new HashMap<>(); // let all but the first CP overlap by one shared state. if (cpSequenceNumber > 0) { sharedState.put( new StateHandleID("shared-" + (cpSequenceNumber - 1)), spy(new PlaceholderStreamStateHandle())); } sharedState.put( new StateHandleID("shared-" + cpSequenceNumber), spy(new ByteStreamStateHandle("shared-" + cpSequenceNumber + "-" + keyGroupRange, new byte[]{'s'}))); IncrementalRemoteKeyedStateHandle managedState = spy(new IncrementalRemoteKeyedStateHandle( new UUID(42L, 42L), keyGroupRange, checkpointId, sharedState, privateState, spy(new ByteStreamStateHandle("meta", new byte[]{'m'})))); OperatorSubtaskState operatorSubtaskState = spy(new OperatorSubtaskState( StateObjectCollection.empty(), StateObjectCollection.empty(), StateObjectCollection.singleton(managedState), StateObjectCollection.empty())); Map<OperatorID, OperatorSubtaskState> opStates = new HashMap<>(); opStates.put(jobVertex1.getOperatorIDs().get(0), operatorSubtaskState); TaskStateSnapshot taskStateSnapshot = new TaskStateSnapshot(opStates); AcknowledgeCheckpoint acknowledgeCheckpoint = new AcknowledgeCheckpoint( jid, jobVertex1.getTaskVertices()[index].getCurrentExecutionAttempt().getAttemptId(), checkpointId, new CheckpointMetrics(), taskStateSnapshot); coord.receiveAcknowledgeMessage(acknowledgeCheckpoint); } }
Example #5
Source File: CheckpointCoordinatorTest.java From flink with Apache License 2.0 | 4 votes |
private void performIncrementalCheckpoint( JobID jid, CheckpointCoordinator coord, ExecutionJobVertex jobVertex1, List<KeyGroupRange> keyGroupPartitions1, long timestamp, int cpSequenceNumber) throws Exception { // trigger the checkpoint coord.triggerCheckpoint(timestamp, false); assertTrue(coord.getPendingCheckpoints().keySet().size() == 1); long checkpointId = Iterables.getOnlyElement(coord.getPendingCheckpoints().keySet()); for (int index = 0; index < jobVertex1.getParallelism(); index++) { KeyGroupRange keyGroupRange = keyGroupPartitions1.get(index); Map<StateHandleID, StreamStateHandle> privateState = new HashMap<>(); privateState.put( new StateHandleID("private-1"), spy(new ByteStreamStateHandle("private-1", new byte[]{'p'}))); Map<StateHandleID, StreamStateHandle> sharedState = new HashMap<>(); // let all but the first CP overlap by one shared state. if (cpSequenceNumber > 0) { sharedState.put( new StateHandleID("shared-" + (cpSequenceNumber - 1)), spy(new PlaceholderStreamStateHandle())); } sharedState.put( new StateHandleID("shared-" + cpSequenceNumber), spy(new ByteStreamStateHandle("shared-" + cpSequenceNumber + "-" + keyGroupRange, new byte[]{'s'}))); IncrementalRemoteKeyedStateHandle managedState = spy(new IncrementalRemoteKeyedStateHandle( new UUID(42L, 42L), keyGroupRange, checkpointId, sharedState, privateState, spy(new ByteStreamStateHandle("meta", new byte[]{'m'})))); OperatorSubtaskState operatorSubtaskState = spy(new OperatorSubtaskState( StateObjectCollection.empty(), StateObjectCollection.empty(), StateObjectCollection.singleton(managedState), StateObjectCollection.empty())); Map<OperatorID, OperatorSubtaskState> opStates = new HashMap<>(); opStates.put(jobVertex1.getOperatorIDs().get(0), operatorSubtaskState); TaskStateSnapshot taskStateSnapshot = new TaskStateSnapshot(opStates); AcknowledgeCheckpoint acknowledgeCheckpoint = new AcknowledgeCheckpoint( jid, jobVertex1.getTaskVertices()[index].getCurrentExecutionAttempt().getAttemptId(), checkpointId, new CheckpointMetrics(), taskStateSnapshot); coord.receiveAcknowledgeMessage(acknowledgeCheckpoint, TASK_MANAGER_LOCATION_INFO); } }
Example #6
Source File: CheckpointCoordinatorTest.java From flink with Apache License 2.0 | 4 votes |
private void performIncrementalCheckpoint( JobID jid, CheckpointCoordinator coord, ExecutionJobVertex jobVertex1, List<KeyGroupRange> keyGroupPartitions1, int cpSequenceNumber) throws Exception { // trigger the checkpoint coord.triggerCheckpoint(false); manuallyTriggeredScheduledExecutor.triggerAll(); assertEquals(1, coord.getPendingCheckpoints().size()); long checkpointId = Iterables.getOnlyElement(coord.getPendingCheckpoints().keySet()); for (int index = 0; index < jobVertex1.getParallelism(); index++) { KeyGroupRange keyGroupRange = keyGroupPartitions1.get(index); Map<StateHandleID, StreamStateHandle> privateState = new HashMap<>(); privateState.put( new StateHandleID("private-1"), spy(new ByteStreamStateHandle("private-1", new byte[]{'p'}))); Map<StateHandleID, StreamStateHandle> sharedState = new HashMap<>(); // let all but the first CP overlap by one shared state. if (cpSequenceNumber > 0) { sharedState.put( new StateHandleID("shared-" + (cpSequenceNumber - 1)), spy(new PlaceholderStreamStateHandle())); } sharedState.put( new StateHandleID("shared-" + cpSequenceNumber), spy(new ByteStreamStateHandle("shared-" + cpSequenceNumber + "-" + keyGroupRange, new byte[]{'s'}))); IncrementalRemoteKeyedStateHandle managedState = spy(new IncrementalRemoteKeyedStateHandle( new UUID(42L, 42L), keyGroupRange, checkpointId, sharedState, privateState, spy(new ByteStreamStateHandle("meta", new byte[]{'m'})))); OperatorSubtaskState operatorSubtaskState = spy(new OperatorSubtaskState( StateObjectCollection.empty(), StateObjectCollection.empty(), StateObjectCollection.singleton(managedState), StateObjectCollection.empty())); Map<OperatorID, OperatorSubtaskState> opStates = new HashMap<>(); opStates.put(jobVertex1.getOperatorIDs().get(0).getGeneratedOperatorID(), operatorSubtaskState); TaskStateSnapshot taskStateSnapshot = new TaskStateSnapshot(opStates); AcknowledgeCheckpoint acknowledgeCheckpoint = new AcknowledgeCheckpoint( jid, jobVertex1.getTaskVertices()[index].getCurrentExecutionAttempt().getAttemptId(), checkpointId, new CheckpointMetrics(), taskStateSnapshot); coord.receiveAcknowledgeMessage(acknowledgeCheckpoint, TASK_MANAGER_LOCATION_INFO); } }