org.apache.flink.runtime.checkpoint.StateObjectCollection Java Examples
The following examples show how to use
org.apache.flink.runtime.checkpoint.StateObjectCollection.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: LocalStateForwardingTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private static <T extends StateObject> void performCheck( Future<SnapshotResult<T>> resultFuture, StateObjectCollection<T> jmState, StateObjectCollection<T> tmState) { SnapshotResult<T> snapshotResult; try { snapshotResult = resultFuture.get(); } catch (Exception e) { throw new RuntimeException(e); } Assert.assertEquals( snapshotResult.getJobManagerOwnedSnapshot(), jmState.iterator().next()); Assert.assertEquals( snapshotResult.getTaskLocalSnapshot(), tmState.iterator().next()); }
Example #2
Source File: MetadataV2V3SerializerBase.java From flink with Apache License 2.0 | 6 votes |
protected OperatorSubtaskState deserializeSubtaskState( DataInputStream dis, @Nullable DeserializationContext context) throws IOException { final boolean hasManagedOperatorState = dis.readInt() != 0; final OperatorStateHandle managedOperatorState = hasManagedOperatorState ? deserializeOperatorStateHandle(dis, context) : null; final boolean hasRawOperatorState = dis.readInt() != 0; final OperatorStateHandle rawOperatorState = hasRawOperatorState ? deserializeOperatorStateHandle(dis, context) : null; final KeyedStateHandle managedKeyedState = deserializeKeyedStateHandle(dis, context); final KeyedStateHandle rawKeyedState = deserializeKeyedStateHandle(dis, context); StateObjectCollection<InputChannelStateHandle> inputChannelState = deserializeInputChannelStateHandle(dis, context); StateObjectCollection<ResultSubpartitionStateHandle> resultSubpartitionState = deserializeResultSubpartitionStateHandle(dis, context); return new OperatorSubtaskState( managedOperatorState, rawOperatorState, managedKeyedState, rawKeyedState, inputChannelState, resultSubpartitionState); }
Example #3
Source File: LocalStateForwardingTest.java From flink with Apache License 2.0 | 6 votes |
private static <T extends StateObject> void performCheck( Future<SnapshotResult<T>> resultFuture, StateObjectCollection<T> jmState, StateObjectCollection<T> tmState) { SnapshotResult<T> snapshotResult; try { snapshotResult = resultFuture.get(); } catch (Exception e) { throw new RuntimeException(e); } Assert.assertEquals( snapshotResult.getJobManagerOwnedSnapshot(), jmState.iterator().next()); Assert.assertEquals( snapshotResult.getTaskLocalSnapshot(), tmState.iterator().next()); }
Example #4
Source File: LocalStateForwardingTest.java From flink with Apache License 2.0 | 6 votes |
private static <T extends StateObject> void performCheck( Future<SnapshotResult<T>> resultFuture, StateObjectCollection<T> jmState, StateObjectCollection<T> tmState) { SnapshotResult<T> snapshotResult; try { snapshotResult = resultFuture.get(); } catch (Exception e) { throw new RuntimeException(e); } Assert.assertEquals( snapshotResult.getJobManagerOwnedSnapshot(), jmState.iterator().next()); Assert.assertEquals( snapshotResult.getTaskLocalSnapshot(), tmState.iterator().next()); }
Example #5
Source File: LocalStateForwardingTest.java From flink with Apache License 2.0 | 5 votes |
private static <T extends StateObject> void performCollectionCheck( Future<SnapshotResult<StateObjectCollection<T>>> resultFuture, StateObjectCollection<T> jmState, StateObjectCollection<T> tmState) { SnapshotResult<StateObjectCollection<T>> snapshotResult; try { snapshotResult = resultFuture.get(); } catch (Exception e) { throw new RuntimeException(e); } Assert.assertEquals(snapshotResult.getJobManagerOwnedSnapshot(), jmState); Assert.assertEquals(snapshotResult.getTaskLocalSnapshot(), tmState); }
Example #6
Source File: BackendRestorerProcedureTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Tests if there is an exception if all restore attempts are exhausted and failed. */ @Test public void testExceptionThrownIfAllRestoresFailed() throws Exception { CloseableRegistry closeableRegistry = new CloseableRegistry(); OperatorStateHandle firstFailHandle = mock(OperatorStateHandle.class); OperatorStateHandle secondFailHandle = mock(OperatorStateHandle.class); OperatorStateHandle thirdFailHandle = mock(OperatorStateHandle.class); List<StateObjectCollection<OperatorStateHandle>> sortedRestoreOptions = Arrays.asList( new StateObjectCollection<>(Collections.singletonList(firstFailHandle)), new StateObjectCollection<>(Collections.singletonList(secondFailHandle)), new StateObjectCollection<>(Collections.singletonList(thirdFailHandle))); BackendRestorerProcedure<OperatorStateBackend, OperatorStateHandle> restorerProcedure = new BackendRestorerProcedure<>(backendSupplier, closeableRegistry, "test op state backend"); try { restorerProcedure.createAndRestore(sortedRestoreOptions); Assert.fail(); } catch (Exception ignore) { } verify(firstFailHandle).openInputStream(); verify(secondFailHandle).openInputStream(); verify(thirdFailHandle).openInputStream(); }
Example #7
Source File: BackendRestorerProcedureTest.java From flink with Apache License 2.0 | 5 votes |
/** * Test that the restore can be stopped via the provided closeable registry. */ @Test public void testCanBeCanceledViaRegistry() throws Exception { CloseableRegistry closeableRegistry = new CloseableRegistry(); OneShotLatch waitForBlock = new OneShotLatch(); OneShotLatch unblock = new OneShotLatch(); OperatorStateHandle blockingRestoreHandle = mock(OperatorStateHandle.class); when(blockingRestoreHandle.openInputStream()).thenReturn(new BlockingFSDataInputStream(waitForBlock, unblock)); List<StateObjectCollection<OperatorStateHandle>> sortedRestoreOptions = Collections.singletonList(new StateObjectCollection<>(Collections.singletonList(blockingRestoreHandle))); BackendRestorerProcedure<OperatorStateBackend, OperatorStateHandle> restorerProcedure = new BackendRestorerProcedure<>(backendSupplier, closeableRegistry, "test op state backend"); AtomicReference<Exception> exceptionReference = new AtomicReference<>(null); Thread restoreThread = new Thread(() -> { try { restorerProcedure.createAndRestore(sortedRestoreOptions); } catch (Exception e) { exceptionReference.set(e); } }); restoreThread.start(); waitForBlock.await(); closeableRegistry.close(); unblock.trigger(); restoreThread.join(); Exception exception = exceptionReference.get(); Assert.assertTrue(exception instanceof FlinkException); }
Example #8
Source File: OperatorSnapshotFinalizerTest.java From flink with Apache License 2.0 | 5 votes |
private void checkResult(Object expected, StateObjectCollection<?> actual) { if (expected == null) { assertTrue(actual == null || actual.isEmpty()); } else { assertEquals(1, actual.size()); assertEquals(expected, actual.iterator().next()); } }
Example #9
Source File: BackendRestorerProcedureTest.java From flink with Apache License 2.0 | 5 votes |
/** * Test that the restore can be stopped via the provided closeable registry. */ @Test public void testCanBeCanceledViaRegistry() throws Exception { CloseableRegistry closeableRegistry = new CloseableRegistry(); OneShotLatch waitForBlock = new OneShotLatch(); OneShotLatch unblock = new OneShotLatch(); OperatorStateHandle blockingRestoreHandle = mock(OperatorStateHandle.class); when(blockingRestoreHandle.openInputStream()).thenReturn(new BlockingFSDataInputStream(waitForBlock, unblock)); List<StateObjectCollection<OperatorStateHandle>> sortedRestoreOptions = Collections.singletonList(new StateObjectCollection<>(Collections.singletonList(blockingRestoreHandle))); BackendRestorerProcedure<OperatorStateBackend, OperatorStateHandle> restorerProcedure = new BackendRestorerProcedure<>(backendSupplier, closeableRegistry, "test op state backend"); AtomicReference<Exception> exceptionReference = new AtomicReference<>(null); Thread restoreThread = new Thread(() -> { try { restorerProcedure.createAndRestore(sortedRestoreOptions); } catch (Exception e) { exceptionReference.set(e); } }); restoreThread.start(); waitForBlock.await(); closeableRegistry.close(); unblock.trigger(); restoreThread.join(); Exception exception = exceptionReference.get(); Assert.assertTrue(exception instanceof FlinkException); }
Example #10
Source File: OperatorStateWriter.java From bravo with Apache License 2.0 | 5 votes |
private StateObjectCollection<OperatorStateHandle> transformSubtaskOpState(Path outDir, Integer subtaskId, StateObjectCollection<OperatorStateHandle> baseState) { if (transformer == null) { return baseState; } StateObjectCollection<OperatorStateHandle> opHandle = baseState; try (OperatorStateBackend opBackend = OperatorStateReader .restoreOperatorStateBackend(opHandle)) { transformer.accept(subtaskId, opBackend); OperatorStateHandle newSnapshot = opBackend .snapshot(checkpointId, System.currentTimeMillis(), new CheckpointStreamFactory() { @Override public CheckpointStateOutputStream createCheckpointStateOutputStream( CheckpointedStateScope scope) throws IOException { return new FileBasedStateOutputStream(outDir.getFileSystem(), new Path(outDir, String.valueOf(UUID.randomUUID()))); } }, null).get().getJobManagerOwnedSnapshot(); return new StateObjectCollection<>(Lists.newArrayList(newSnapshot)); } catch (Exception e) { throw new RuntimeException(e); } }
Example #11
Source File: OperatorStateReader.java From bravo with Apache License 2.0 | 5 votes |
public static OperatorStateBackend restoreOperatorStateBackend( StateObjectCollection<OperatorStateHandle> managedOpState) throws Exception { DefaultOperatorStateBackend stateBackend = new DefaultOperatorStateBackend( OperatorStateReader.class.getClassLoader(), new ExecutionConfig(), false); stateBackend.restore(managedOpState); return stateBackend; }
Example #12
Source File: OperatorStateInputFormat.java From flink with Apache License 2.0 | 5 votes |
private OperatorStateInputSplit[] subPartitionSingleSplit(int minNumSplits, OperatorStateInputSplit[] splits) { if (splits.length == 0) { return splits; } // We only want to output a single instance of the union state so we only need // to transform a single input split. An arbitrary split is chosen and // sub-partitioned for better data distribution across the cluster. return CollectionUtil.mapWithIndex( CollectionUtil.partition(splits[0].getPrioritizedManagedOperatorState().get(0).asList(), minNumSplits), (state, index) -> new OperatorStateInputSplit(new StateObjectCollection<>(new ArrayList<>(state)), index) ).toArray(OperatorStateInputSplit[]::new); }
Example #13
Source File: OperatorStateInputFormat.java From flink with Apache License 2.0 | 5 votes |
private OperatorStateInputSplit[] getOperatorStateInputSplits(int minNumSplits) { Map<OperatorInstanceID, List<OperatorStateHandle>> newManagedOperatorStates = reDistributePartitionableStates( singletonList(operatorState), minNumSplits, singletonList(OperatorIDPair.generatedIDOnly(operatorState.getOperatorID())), OperatorSubtaskState::getManagedOperatorState, RoundRobinOperatorStateRepartitioner.INSTANCE); return CollectionUtil.mapWithIndex( newManagedOperatorStates.values(), (handles, index) -> new OperatorStateInputSplit(new StateObjectCollection<>(handles), index) ).toArray(OperatorStateInputSplit[]::new); }
Example #14
Source File: KeyGroupRangeInputSplit.java From flink with Apache License 2.0 | 5 votes |
public PrioritizedOperatorSubtaskState getPrioritizedOperatorSubtaskState() { return new PrioritizedOperatorSubtaskState.Builder( new OperatorSubtaskState( StateObjectCollection.empty(), StateObjectCollection.empty(), new StateObjectCollection<>(managedKeyedState), new StateObjectCollection<>(rawKeyedState), StateObjectCollection.empty(), StateObjectCollection.empty() ), Collections.emptyList() ).build(); }
Example #15
Source File: SubtaskCheckpointCoordinatorImpl.java From flink with Apache License 2.0 | 5 votes |
private OperatorSnapshotFutures buildOperatorSnapshotFutures( CheckpointMetaData checkpointMetaData, CheckpointOptions checkpointOptions, OperatorChain<?, ?> operatorChain, StreamOperator<?> op, Supplier<Boolean> isCanceled, ChannelStateWriteResult channelStateWriteResult, CheckpointStreamFactory storage) throws Exception { OperatorSnapshotFutures snapshotInProgress = checkpointStreamOperator( op, checkpointMetaData, checkpointOptions, storage, isCanceled); if (op == operatorChain.getHeadOperator()) { snapshotInProgress.setInputChannelStateFuture( channelStateWriteResult .getInputChannelStateHandles() .thenApply(StateObjectCollection::new) .thenApply(SnapshotResult::of)); } if (op == operatorChain.getTailOperator()) { snapshotInProgress.setResultSubpartitionStateFuture( channelStateWriteResult .getResultSubpartitionStateHandles() .thenApply(StateObjectCollection::new) .thenApply(SnapshotResult::of)); } return snapshotInProgress; }
Example #16
Source File: KeyGroupRangeInputSplit.java From flink with Apache License 2.0 | 5 votes |
public PrioritizedOperatorSubtaskState getPrioritizedOperatorSubtaskState() { return new PrioritizedOperatorSubtaskState.Builder( new OperatorSubtaskState( StateObjectCollection.empty(), StateObjectCollection.empty(), new StateObjectCollection<>(managedKeyedState), new StateObjectCollection<>(rawKeyedState) ), Collections.emptyList() ).build(); }
Example #17
Source File: AbstractStreamOperatorTestHarness.java From flink with Apache License 2.0 | 5 votes |
/** * Takes the different {@link OperatorSubtaskState} created by calling {@link #snapshot(long, long)} * on different instances of {@link AbstractStreamOperatorTestHarness} (each one representing one subtask) * and repacks them into a single {@link OperatorSubtaskState} so that the parallelism of the test * can change arbitrarily (i.e. be able to scale both up and down). * * <p>After repacking the partial states, remember to use * {@link #repartitionOperatorState(OperatorSubtaskState, int, int, int, int)} to reshape the state handles * to include only those key-group states in the local key-group range and the operator states that would * be assigned to the local subtask. Bear in mind that for parallelism greater than one, you * have to use the constructor {@link #AbstractStreamOperatorTestHarness(StreamOperator, int, int, int)}. * * <p><b>NOTE: </b> each of the {@code handles} in the argument list is assumed to be from a single task of a single * operator (i.e. chain length of one). * * <p>For an example of how to use it, have a look at * {@link AbstractStreamOperatorTest#testStateAndTimerStateShufflingScalingDown()}. * * @param handles the different states to be merged. * @return the resulting state, or {@code null} if no partial states are specified. */ public static OperatorSubtaskState repackageState(OperatorSubtaskState... handles) throws Exception { if (handles.length < 1) { return null; } else if (handles.length == 1) { return handles[0]; } List<OperatorStateHandle> mergedManagedOperatorState = new ArrayList<>(handles.length); List<OperatorStateHandle> mergedRawOperatorState = new ArrayList<>(handles.length); List<KeyedStateHandle> mergedManagedKeyedState = new ArrayList<>(handles.length); List<KeyedStateHandle> mergedRawKeyedState = new ArrayList<>(handles.length); for (OperatorSubtaskState handle : handles) { Collection<OperatorStateHandle> managedOperatorState = handle.getManagedOperatorState(); Collection<OperatorStateHandle> rawOperatorState = handle.getRawOperatorState(); Collection<KeyedStateHandle> managedKeyedState = handle.getManagedKeyedState(); Collection<KeyedStateHandle> rawKeyedState = handle.getRawKeyedState(); mergedManagedOperatorState.addAll(managedOperatorState); mergedRawOperatorState.addAll(rawOperatorState); mergedManagedKeyedState.addAll(managedKeyedState); mergedRawKeyedState.addAll(rawKeyedState); } return new OperatorSubtaskState( new StateObjectCollection<>(mergedManagedOperatorState), new StateObjectCollection<>(mergedRawOperatorState), new StateObjectCollection<>(mergedManagedKeyedState), new StateObjectCollection<>(mergedRawKeyedState)); }
Example #18
Source File: MetadataV2V3SerializerBase.java From flink with Apache License 2.0 | 5 votes |
private static <T extends StateObject> void serializeSingleton( StateObjectCollection<T> stateObjectCollection, DataOutputStream dos, BiConsumerWithException<T, DataOutputStream, IOException> cons) throws IOException { final T state = extractSingleton(stateObjectCollection); if (state != null) { dos.writeInt(1); cons.accept(state, dos); } else { dos.writeInt(0); } }
Example #19
Source File: MetadataV2V3SerializerBase.java From flink with Apache License 2.0 | 5 votes |
static <T extends StateObject> StateObjectCollection<T> deserializeCollection( DataInputStream dis, DeserializationContext context, BiFunctionWithException<DataInputStream, DeserializationContext, T, IOException> s) throws IOException { int size = dis.readInt(); List<T> result = new ArrayList<>(); for (int i = 0; i < size; i++) { result.add(s.apply(dis, context)); } return new StateObjectCollection<>(result); }
Example #20
Source File: MetadataV3Serializer.java From flink with Apache License 2.0 | 5 votes |
@VisibleForTesting @Override public StateObjectCollection<ResultSubpartitionStateHandle> deserializeResultSubpartitionStateHandle( DataInputStream dis, @Nullable DeserializationContext context) throws IOException { return deserializeCollection(dis, context, channelStateHandleSerializer::deserializeResultSubpartitionStateHandle); }
Example #21
Source File: MetadataV3Serializer.java From flink with Apache License 2.0 | 5 votes |
@VisibleForTesting @Override public StateObjectCollection<InputChannelStateHandle> deserializeInputChannelStateHandle( DataInputStream dis, @Nullable DeserializationContext context) throws IOException { return deserializeCollection(dis, context, channelStateHandleSerializer::deserializeInputChannelStateHandle); }
Example #22
Source File: MetadataV3Serializer.java From flink with Apache License 2.0 | 5 votes |
private <T extends StateObject> void serializeCollection( StateObjectCollection<T> stateObjectCollection, DataOutputStream dos, BiConsumerWithException<T, DataOutputStream, IOException> cons) throws IOException { if (stateObjectCollection == null) { dos.writeInt(0); } else { dos.writeInt(stateObjectCollection.size()); for (T stateObject : stateObjectCollection) { cons.accept(stateObject, dos); } } }
Example #23
Source File: OperatorSnapshotFutures.java From flink with Apache License 2.0 | 5 votes |
public OperatorSnapshotFutures( @Nonnull RunnableFuture<SnapshotResult<KeyedStateHandle>> keyedStateManagedFuture, @Nonnull RunnableFuture<SnapshotResult<KeyedStateHandle>> keyedStateRawFuture, @Nonnull RunnableFuture<SnapshotResult<OperatorStateHandle>> operatorStateManagedFuture, @Nonnull RunnableFuture<SnapshotResult<OperatorStateHandle>> operatorStateRawFuture, @Nonnull Future<SnapshotResult<StateObjectCollection<InputChannelStateHandle>>> inputChannelStateFuture, @Nonnull Future<SnapshotResult<StateObjectCollection<ResultSubpartitionStateHandle>>> resultSubpartitionStateFuture) { this.keyedStateManagedFuture = keyedStateManagedFuture; this.keyedStateRawFuture = keyedStateRawFuture; this.operatorStateManagedFuture = operatorStateManagedFuture; this.operatorStateRawFuture = operatorStateRawFuture; this.inputChannelStateFuture = inputChannelStateFuture; this.resultSubpartitionStateFuture = resultSubpartitionStateFuture; }
Example #24
Source File: OperatorSnapshotFinalizer.java From flink with Apache License 2.0 | 5 votes |
public OperatorSnapshotFinalizer( @Nonnull OperatorSnapshotFutures snapshotFutures) throws ExecutionException, InterruptedException { SnapshotResult<KeyedStateHandle> keyedManaged = FutureUtils.runIfNotDoneAndGet(snapshotFutures.getKeyedStateManagedFuture()); SnapshotResult<KeyedStateHandle> keyedRaw = FutureUtils.runIfNotDoneAndGet(snapshotFutures.getKeyedStateRawFuture()); SnapshotResult<OperatorStateHandle> operatorManaged = FutureUtils.runIfNotDoneAndGet(snapshotFutures.getOperatorStateManagedFuture()); SnapshotResult<OperatorStateHandle> operatorRaw = FutureUtils.runIfNotDoneAndGet(snapshotFutures.getOperatorStateRawFuture()); SnapshotResult<StateObjectCollection<InputChannelStateHandle>> inputChannel = snapshotFutures.getInputChannelStateFuture().get(); SnapshotResult<StateObjectCollection<ResultSubpartitionStateHandle>> resultSubpartition = snapshotFutures.getResultSubpartitionStateFuture().get(); jobManagerOwnedState = new OperatorSubtaskState( operatorManaged.getJobManagerOwnedSnapshot(), operatorRaw.getJobManagerOwnedSnapshot(), keyedManaged.getJobManagerOwnedSnapshot(), keyedRaw.getJobManagerOwnedSnapshot(), inputChannel.getJobManagerOwnedSnapshot(), resultSubpartition.getJobManagerOwnedSnapshot() ); taskLocalState = new OperatorSubtaskState( operatorManaged.getTaskLocalSnapshot(), operatorRaw.getTaskLocalSnapshot(), keyedManaged.getTaskLocalSnapshot(), keyedRaw.getTaskLocalSnapshot(), inputChannel.getTaskLocalSnapshot(), resultSubpartition.getTaskLocalSnapshot() ); }
Example #25
Source File: ChannelStateReaderImplTest.java From flink with Apache License 2.0 | 5 votes |
private TaskStateSnapshot taskStateSnapshot(Collection<InputChannelStateHandle> inputChannelStateHandles) { return new TaskStateSnapshot(Collections.singletonMap( new OperatorID(), new OperatorSubtaskState( StateObjectCollection.empty(), StateObjectCollection.empty(), StateObjectCollection.empty(), StateObjectCollection.empty(), new StateObjectCollection<>(inputChannelStateHandles), StateObjectCollection.empty() ))); }
Example #26
Source File: StreamTaskStateInitializerImpl.java From flink with Apache License 2.0 | 5 votes |
protected CloseableIterable<KeyGroupStatePartitionStreamProvider> rawKeyedStateInputs( Iterator<StateObjectCollection<KeyedStateHandle>> restoreStateAlternatives) { if (restoreStateAlternatives.hasNext()) { Collection<KeyedStateHandle> rawKeyedState = restoreStateAlternatives.next(); // TODO currently this does not support local state recovery, so we expect there is only one handle. Preconditions.checkState( !restoreStateAlternatives.hasNext(), "Local recovery is currently not implemented for raw keyed state, but found state alternative."); if (rawKeyedState != null) { Collection<KeyGroupsStateHandle> keyGroupsStateHandles = transform(rawKeyedState); final CloseableRegistry closeableRegistry = new CloseableRegistry(); return new CloseableIterable<KeyGroupStatePartitionStreamProvider>() { @Override public void close() throws IOException { closeableRegistry.close(); } @Override public Iterator<KeyGroupStatePartitionStreamProvider> iterator() { return new KeyGroupStreamIterator(keyGroupsStateHandles.iterator(), closeableRegistry); } }; } } return CloseableIterable.empty(); }
Example #27
Source File: ChannelPersistenceITCase.java From flink with Apache License 2.0 | 5 votes |
private TaskStateSnapshot toTaskStateSnapshot(ChannelStateWriteResult t) throws Exception { return new TaskStateSnapshot(singletonMap(new OperatorID(), new OperatorSubtaskState( StateObjectCollection.empty(), StateObjectCollection.empty(), StateObjectCollection.empty(), StateObjectCollection.empty(), new StateObjectCollection<>(t.getInputChannelStateHandles().get()), new StateObjectCollection<>(t.getResultSubpartitionStateHandles().get()) ) )); }
Example #28
Source File: StreamTaskStateInitializerImpl.java From flink with Apache License 2.0 | 5 votes |
protected CloseableIterable<StatePartitionStreamProvider> rawOperatorStateInputs( Iterator<StateObjectCollection<OperatorStateHandle>> restoreStateAlternatives) { if (restoreStateAlternatives.hasNext()) { Collection<OperatorStateHandle> rawOperatorState = restoreStateAlternatives.next(); // TODO currently this does not support local state recovery, so we expect there is only one handle. Preconditions.checkState( !restoreStateAlternatives.hasNext(), "Local recovery is currently not implemented for raw operator state, but found state alternative."); if (rawOperatorState != null) { return new CloseableIterable<StatePartitionStreamProvider>() { final CloseableRegistry closeableRegistry = new CloseableRegistry(); @Override public void close() throws IOException { closeableRegistry.close(); } @Nonnull @Override public Iterator<StatePartitionStreamProvider> iterator() { return new OperatorStateStreamIterator( DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME, rawOperatorState.iterator(), closeableRegistry); } }; } } return CloseableIterable.empty(); }
Example #29
Source File: CoordinatorEventsExactlyOnceITCase.java From flink with Apache License 2.0 | 5 votes |
static TaskStateSnapshot createSnapshot(StreamStateHandle handle, OperatorID operatorId) { final OperatorStateHandle.StateMetaInfo metaInfo = new OperatorStateHandle.StateMetaInfo( new long[]{0}, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE); final OperatorStateHandle state = new OperatorStreamStateHandle( Collections.singletonMap("état_et_moi_:_ça_fait_deux", metaInfo), handle); final OperatorSubtaskState oss = new OperatorSubtaskState( StateObjectCollection.singleton(state), StateObjectCollection.empty(), StateObjectCollection.empty(), StateObjectCollection.empty()); return new TaskStateSnapshot(Collections.singletonMap(operatorId, oss)); }
Example #30
Source File: CoordinatorEventsExactlyOnceITCase.java From flink with Apache License 2.0 | 5 votes |
@Nullable static StreamStateHandle readSnapshot(TaskStateManager stateManager, OperatorID operatorId) { final PrioritizedOperatorSubtaskState poss = stateManager.prioritizedOperatorState(operatorId); if (!poss.isRestored()) { return null; } final StateObjectCollection<OperatorStateHandle> opState = stateManager.prioritizedOperatorState(operatorId).getPrioritizedManagedOperatorState().get(0); final OperatorStateHandle handle = Iterators.getOnlyElement(opState.iterator()); return handle.getDelegateStateHandle(); }