Java Code Examples for org.apache.flink.api.common.state.ListState#get()
The following examples show how to use
org.apache.flink.api.common.state.ListState#get() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MergingWindowSet.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Restores a {@link MergingWindowSet} from the given state. */ public MergingWindowSet(MergingWindowAssigner<?, W> windowAssigner, ListState<Tuple2<W, W>> state) throws Exception { this.windowAssigner = windowAssigner; mapping = new HashMap<>(); Iterable<Tuple2<W, W>> windowState = state.get(); if (windowState != null) { for (Tuple2<W, W> window: windowState) { mapping.put(window.f0, window.f1); } } this.state = state; initialMapping = new HashMap<>(); initialMapping.putAll(mapping); }
Example 2
Source File: MergingWindowSet.java From flink with Apache License 2.0 | 6 votes |
/** * Restores a {@link MergingWindowSet} from the given state. */ public MergingWindowSet(MergingWindowAssigner<?, W> windowAssigner, ListState<Tuple2<W, W>> state) throws Exception { this.windowAssigner = windowAssigner; mapping = new HashMap<>(); Iterable<Tuple2<W, W>> windowState = state.get(); if (windowState != null) { for (Tuple2<W, W> window: windowState) { mapping.put(window.f0, window.f1); } } this.state = state; initialMapping = new HashMap<>(); initialMapping.putAll(mapping); }
Example 3
Source File: Buckets.java From flink with Apache License 2.0 | 5 votes |
private void initializeActiveBuckets(final ListState<byte[]> bucketStates) throws Exception { for (byte[] serializedRecoveredState : bucketStates.get()) { final BucketState<BucketID> recoveredState = SimpleVersionedSerialization.readVersionAndDeSerialize( bucketStateSerializer, serializedRecoveredState); handleRestoredBucketState(recoveredState); } }
Example 4
Source File: StreamingRuntimeContextTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testListStateReturnsEmptyListByDefault() throws Exception { StreamingRuntimeContext context = createRuntimeContext(); ListStateDescriptor<String> descr = new ListStateDescriptor<>("name", String.class); ListState<String> state = context.getListState(descr); Iterable<String> value = state.get(); assertNotNull(value); assertFalse(value.iterator().hasNext()); }
Example 5
Source File: FlinkBroadcastStateInternals.java From beam with Apache License 2.0 | 5 votes |
Map<String, T> getMapFromBroadcastState() throws Exception { ListState<Map<String, T>> state = flinkStateBackend.getUnionListState(flinkStateDescriptor); Iterable<Map<String, T>> iterable = state.get(); Map<String, T> ret = null; if (iterable != null) { // just use index 0 Iterator<Map<String, T>> iterator = iterable.iterator(); if (iterator.hasNext()) { ret = iterator.next(); } } return ret; }
Example 6
Source File: FlinkStateInternals.java From beam with Apache License 2.0 | 5 votes |
@Override @Nonnull public Iterable<T> read() { try { ListState<T> partitionedState = flinkStateBackend.getPartitionedState( namespace.stringKey(), StringSerializer.INSTANCE, flinkStateDescriptor); Iterable<T> result = partitionedState.get(); if (storesVoidValues) { return () -> { final Iterator underlying = result.iterator(); return new Iterator<T>() { @Override public boolean hasNext() { return underlying.hasNext(); } @Override public T next() { // Simply move the iterator forward but ignore the value. // The value can be the structural null value or NULL itself, // if this has been restored from serialized state. underlying.next(); return null; } }; }; } return result != null ? ImmutableList.copyOf(result) : Collections.emptyList(); } catch (Exception e) { throw new RuntimeException("Error reading state.", e); } }
Example 7
Source File: OperatorStateReader.java From bravo with Apache License 2.0 | 5 votes |
/** * Read the serializableListState stored in the checkpoint for the given * operator subtask */ public List<Serializable> getSerializableListState(int subtask) throws Exception { OperatorStateBackend backend = createOperatorStateBackendFromSnapshot(subtask); @SuppressWarnings("deprecation") ListState<Serializable> listState = backend .getSerializableListState(DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME); List<Serializable> list = new ArrayList<>(); for (Serializable serializable : listState.get()) { list.add(serializable); } return list; }
Example 8
Source File: StreamingRuntimeContextTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testListStateReturnsEmptyListByDefault() throws Exception { StreamingRuntimeContext context = new StreamingRuntimeContext( createListPlainMockOp(), createMockEnvironment(), Collections.<String, Accumulator<?, ?>>emptyMap()); ListStateDescriptor<String> descr = new ListStateDescriptor<>("name", String.class); ListState<String> state = context.getListState(descr); Iterable<String> value = state.get(); assertNotNull(value); assertFalse(value.iterator().hasNext()); }
Example 9
Source File: StreamingFunctionUtils.java From flink with Apache License 2.0 | 5 votes |
private static boolean tryRestoreFunction( StateInitializationContext context, Function userFunction) throws Exception { if (userFunction instanceof CheckpointedFunction) { ((CheckpointedFunction) userFunction).initializeState(context); return true; } if (context.isRestored() && userFunction instanceof ListCheckpointed) { @SuppressWarnings("unchecked") ListCheckpointed<Serializable> listCheckpointedFun = (ListCheckpointed<Serializable>) userFunction; ListState<Serializable> listState = context.getOperatorStateStore(). getSerializableListState(DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME); List<Serializable> list = new ArrayList<>(); for (Serializable serializable : listState.get()) { list.add(serializable); } try { listCheckpointedFun.restoreState(list); } catch (Exception e) { throw new Exception("Failed to restore state to function: " + e.getMessage(), e); } return true; } return false; }
Example 10
Source File: Buckets.java From flink with Apache License 2.0 | 5 votes |
private void initializeActiveBuckets(final ListState<byte[]> bucketStates) throws Exception { for (byte[] serializedRecoveredState : bucketStates.get()) { final BucketState<BucketID> recoveredState = SimpleVersionedSerialization.readVersionAndDeSerialize( bucketStateSerializer, serializedRecoveredState); handleRestoredBucketState(recoveredState); } }
Example 11
Source File: Buckets.java From flink with Apache License 2.0 | 5 votes |
private void initializePartCounter(final ListState<Long> partCounterState) throws Exception { long maxCounter = 0L; for (long partCounter: partCounterState.get()) { maxCounter = Math.max(partCounter, maxCounter); } maxPartCounter = maxCounter; }
Example 12
Source File: StreamingFunctionUtils.java From flink with Apache License 2.0 | 5 votes |
private static boolean tryRestoreFunction( StateInitializationContext context, Function userFunction) throws Exception { if (userFunction instanceof CheckpointedFunction) { ((CheckpointedFunction) userFunction).initializeState(context); return true; } if (context.isRestored() && userFunction instanceof ListCheckpointed) { @SuppressWarnings("unchecked") ListCheckpointed<Serializable> listCheckpointedFun = (ListCheckpointed<Serializable>) userFunction; // We are using JavaSerializer from the flink-runtime module here. This is very naughty and // we shouldn't be doing it because ideally nothing in the API modules/connector depends // directly on flink-runtime. We are doing it here because we need to maintain backwards // compatibility with old state and because we will have to rework/remove this code soon. ListStateDescriptor<Serializable> listStateDescriptor = new ListStateDescriptor<>(DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME, new JavaSerializer<>()); ListState<Serializable> listState = context.getOperatorStateStore().getListState(listStateDescriptor); List<Serializable> list = new ArrayList<>(); for (Serializable serializable : listState.get()) { list.add(serializable); } try { listCheckpointedFun.restoreState(list); } catch (Exception e) { throw new Exception("Failed to restore state to function: " + e.getMessage(), e); } return true; } return false; }
Example 13
Source File: StreamingFunctionUtils.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private static boolean tryRestoreFunction( StateInitializationContext context, Function userFunction) throws Exception { if (userFunction instanceof CheckpointedFunction) { ((CheckpointedFunction) userFunction).initializeState(context); return true; } if (context.isRestored() && userFunction instanceof ListCheckpointed) { @SuppressWarnings("unchecked") ListCheckpointed<Serializable> listCheckpointedFun = (ListCheckpointed<Serializable>) userFunction; ListState<Serializable> listState = context.getOperatorStateStore(). getSerializableListState(DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME); List<Serializable> list = new ArrayList<>(); for (Serializable serializable : listState.get()) { list.add(serializable); } try { listCheckpointedFun.restoreState(list); } catch (Exception e) { throw new Exception("Failed to restore state to function: " + e.getMessage(), e); } return true; } return false; }
Example 14
Source File: Buckets.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void initializeActiveBuckets(final ListState<byte[]> bucketStates) throws Exception { for (byte[] serializedRecoveredState : bucketStates.get()) { final BucketState<BucketID> recoveredState = SimpleVersionedSerialization.readVersionAndDeSerialize( bucketStateSerializer, serializedRecoveredState); handleRestoredBucketState(recoveredState); } }
Example 15
Source File: Buckets.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void initializePartCounter(final ListState<Long> partCounterState) throws Exception { long maxCounter = 0L; for (long partCounter: partCounterState.get()) { maxCounter = Math.max(partCounter, maxCounter); } maxPartCounter = maxCounter; }
Example 16
Source File: StateBackendTestBase.java From flink with Apache License 2.0 | 4 votes |
@Test @SuppressWarnings("unchecked") public void testListStateRestoreWithWrongSerializers() throws Exception { CheckpointStreamFactory streamFactory = createStreamFactory(); SharedStateRegistry sharedStateRegistry = new SharedStateRegistry(); AbstractKeyedStateBackend<Integer> backend = createKeyedBackend(IntSerializer.INSTANCE); try { ListStateDescriptor<String> kvId = new ListStateDescriptor<>("id", String.class); ListState<String> state = backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId); backend.setCurrentKey(1); state.add("1"); backend.setCurrentKey(2); state.add("2"); // draw a snapshot KeyedStateHandle snapshot1 = runSnapshot( backend.snapshot(682375462378L, 2, streamFactory, CheckpointOptions.forCheckpointWithDefaultLocation()), sharedStateRegistry); backend.dispose(); // restore the first snapshot and validate it backend = restoreKeyedBackend(IntSerializer.INSTANCE, snapshot1); snapshot1.discardState(); @SuppressWarnings("unchecked") TypeSerializer<String> fakeStringSerializer = (TypeSerializer<String>) (TypeSerializer<?>) FloatSerializer.INSTANCE; try { kvId = new ListStateDescriptor<>("id", fakeStringSerializer); state = backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId); state.get(); fail("should recognize wrong serializers"); } catch (StateMigrationException ignored) { // expected } } finally { backend.dispose(); } }
Example 17
Source File: BucketingSinkMigrationTest.java From flink with Apache License 2.0 | 4 votes |
/** * The actual paths in this depend on the binary checkpoint so it you update this the paths * here have to be updated as well. */ @Override public void initializeState(FunctionInitializationContext context) throws Exception { OperatorStateStore stateStore = context.getOperatorStateStore(); ListState<State<T>> restoredBucketStates = stateStore.getSerializableListState("bucket-states"); if (context.isRestored()) { for (State<T> states : restoredBucketStates.get()) { for (String bucketPath : states.bucketStates.keySet()) { BucketState state = states.getBucketState(new Path(bucketPath)); String current = state.currentFile; long validLength = state.currentFileValidLength; Assert.assertEquals(expectedBucketFilesPrefix + "4", current); Assert.assertEquals(6, validLength); List<String> pendingFiles = state.pendingFiles; assertTrue(pendingFiles.isEmpty()); final Map<Long, List<String>> pendingFilesPerCheckpoint = state.pendingFilesPerCheckpoint; Assert.assertEquals(1, pendingFilesPerCheckpoint.size()); for (Map.Entry<Long, List<String>> entry: pendingFilesPerCheckpoint.entrySet()) { long checkpoint = entry.getKey(); List<String> files = entry.getValue(); Assert.assertEquals(0L, checkpoint); Assert.assertEquals(4, files.size()); for (int i = 0; i < 4; i++) { Assert.assertEquals( expectedBucketFilesPrefix + i, files.get(i)); } } } } } initializeCalled = true; super.initializeState(context); }
Example 18
Source File: BucketingSinkMigrationTest.java From flink with Apache License 2.0 | 4 votes |
/** * The actual paths in this depend on the binary checkpoint so it you update this the paths * here have to be updated as well. */ @Override public void initializeState(FunctionInitializationContext context) throws Exception { OperatorStateStore stateStore = context.getOperatorStateStore(); // We are using JavaSerializer from the flink-runtime module here. This is very naughty and // we shouldn't be doing it because ideally nothing in the API modules/connector depends // directly on flink-runtime. We are doing it here because we need to maintain backwards // compatibility with old state and because we will have to rework/remove this code soon. ListState<State<T>> restoredBucketStates = stateStore.getListState(new ListStateDescriptor<>("bucket-states", new JavaSerializer<>())); if (context.isRestored()) { for (State<T> states : restoredBucketStates.get()) { for (String bucketPath : states.bucketStates.keySet()) { BucketState state = states.getBucketState(new Path(bucketPath)); String current = state.currentFile; long validLength = state.currentFileValidLength; Assert.assertEquals(expectedBucketFilesPrefix + "4", current); Assert.assertEquals(6, validLength); List<String> pendingFiles = state.pendingFiles; assertTrue(pendingFiles.isEmpty()); final Map<Long, List<String>> pendingFilesPerCheckpoint = state.pendingFilesPerCheckpoint; Assert.assertEquals(1, pendingFilesPerCheckpoint.size()); for (Map.Entry<Long, List<String>> entry: pendingFilesPerCheckpoint.entrySet()) { long checkpoint = entry.getKey(); List<String> files = entry.getValue(); Assert.assertEquals(0L, checkpoint); Assert.assertEquals(4, files.size()); for (int i = 0; i < 4; i++) { Assert.assertEquals( expectedBucketFilesPrefix + i, files.get(i)); } } } } } initializeCalled = true; super.initializeState(context); }
Example 19
Source File: StateBackendTestBase.java From flink with Apache License 2.0 | 4 votes |
@Test @SuppressWarnings("unchecked") public void testListStateRestoreWithWrongSerializers() throws Exception { CheckpointStreamFactory streamFactory = createStreamFactory(); SharedStateRegistry sharedStateRegistry = new SharedStateRegistry(); AbstractKeyedStateBackend<Integer> backend = createKeyedBackend(IntSerializer.INSTANCE); try { ListStateDescriptor<String> kvId = new ListStateDescriptor<>("id", String.class); ListState<String> state = backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId); backend.setCurrentKey(1); state.add("1"); backend.setCurrentKey(2); state.add("2"); // draw a snapshot KeyedStateHandle snapshot1 = runSnapshot( backend.snapshot(682375462378L, 2, streamFactory, CheckpointOptions.forCheckpointWithDefaultLocation()), sharedStateRegistry); backend.dispose(); // restore the first snapshot and validate it backend = restoreKeyedBackend(IntSerializer.INSTANCE, snapshot1); snapshot1.discardState(); @SuppressWarnings("unchecked") TypeSerializer<String> fakeStringSerializer = (TypeSerializer<String>) (TypeSerializer<?>) FloatSerializer.INSTANCE; try { kvId = new ListStateDescriptor<>("id", fakeStringSerializer); state = backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId); state.get(); fail("should recognize wrong serializers"); } catch (StateMigrationException ignored) { // expected } } finally { backend.dispose(); } }
Example 20
Source File: StateBackendTestBase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test @SuppressWarnings("unchecked") public void testListStateRestoreWithWrongSerializers() throws Exception { CheckpointStreamFactory streamFactory = createStreamFactory(); SharedStateRegistry sharedStateRegistry = new SharedStateRegistry(); AbstractKeyedStateBackend<Integer> backend = createKeyedBackend(IntSerializer.INSTANCE); try { ListStateDescriptor<String> kvId = new ListStateDescriptor<>("id", String.class); ListState<String> state = backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId); backend.setCurrentKey(1); state.add("1"); backend.setCurrentKey(2); state.add("2"); // draw a snapshot KeyedStateHandle snapshot1 = runSnapshot( backend.snapshot(682375462378L, 2, streamFactory, CheckpointOptions.forCheckpointWithDefaultLocation()), sharedStateRegistry); backend.dispose(); // restore the first snapshot and validate it backend = restoreKeyedBackend(IntSerializer.INSTANCE, snapshot1); snapshot1.discardState(); @SuppressWarnings("unchecked") TypeSerializer<String> fakeStringSerializer = (TypeSerializer<String>) (TypeSerializer<?>) FloatSerializer.INSTANCE; try { kvId = new ListStateDescriptor<>("id", fakeStringSerializer); state = backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId); state.get(); fail("should recognize wrong serializers"); } catch (StateMigrationException ignored) { // expected } } finally { backend.dispose(); } }