Java Code Examples for org.apache.flink.api.common.state.OperatorStateStore#getListState()
The following examples show how to use
org.apache.flink.api.common.state.OperatorStateStore#getListState() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PartitionTimeCommitTigger.java From flink with Apache License 2.0 | 6 votes |
public PartitionTimeCommitTigger( boolean isRestored, OperatorStateStore stateStore, Configuration conf, ClassLoader cl, List<String> partitionKeys) throws Exception { this.pendingPartitionsState = stateStore.getListState(PENDING_PARTITIONS_STATE_DESC); this.pendingPartitions = new HashSet<>(); if (isRestored) { pendingPartitions.addAll(pendingPartitionsState.get().iterator().next()); } this.partitionKeys = partitionKeys; this.commitDelay = conf.get(SINK_PARTITION_COMMIT_DELAY).toMillis(); this.extractor = PartitionTimeExtractor.create( cl, conf.get(PARTITION_TIME_EXTRACTOR_KIND), conf.get(PARTITION_TIME_EXTRACTOR_CLASS), conf.get(PARTITION_TIME_EXTRACTOR_TIMESTAMP_PATTERN)); this.watermarksState = stateStore.getListState(WATERMARKS_STATE_DESC); this.watermarks = new TreeMap<>(); if (isRestored) { watermarks.putAll(watermarksState.get().iterator().next()); } }
Example 2
Source File: StreamingFileSinkHelper.java From flink with Apache License 2.0 | 6 votes |
public StreamingFileSinkHelper( Buckets<IN, ?> buckets, boolean isRestored, OperatorStateStore stateStore, ProcessingTimeService procTimeService, long bucketCheckInterval) throws Exception { this.bucketCheckInterval = bucketCheckInterval; this.buckets = buckets; this.bucketStates = stateStore.getListState(BUCKET_STATE_DESC); this.maxPartCountersState = stateStore.getUnionListState(MAX_PART_COUNTER_STATE_DESC); this.procTimeService = procTimeService; if (isRestored) { buckets.initializeState(bucketStates, maxPartCountersState); } long currentProcessingTime = procTimeService.getCurrentProcessingTime(); procTimeService.registerTimer(currentProcessingTime + bucketCheckInterval, this); }
Example 3
Source File: StreamingFileSink.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void initializeState(FunctionInitializationContext context) throws Exception { final int subtaskIndex = getRuntimeContext().getIndexOfThisSubtask(); this.buckets = bucketsBuilder.createBuckets(subtaskIndex); final OperatorStateStore stateStore = context.getOperatorStateStore(); bucketStates = stateStore.getListState(BUCKET_STATE_DESC); maxPartCountersState = stateStore.getUnionListState(MAX_PART_COUNTER_STATE_DESC); if (context.isRestored()) { buckets.initializeState(bucketStates, maxPartCountersState); } }
Example 4
Source File: StreamingFileSink.java From flink with Apache License 2.0 | 5 votes |
@Override public void initializeState(FunctionInitializationContext context) throws Exception { final int subtaskIndex = getRuntimeContext().getIndexOfThisSubtask(); this.buckets = bucketsBuilder.createBuckets(subtaskIndex); final OperatorStateStore stateStore = context.getOperatorStateStore(); bucketStates = stateStore.getListState(BUCKET_STATE_DESC); maxPartCountersState = stateStore.getUnionListState(MAX_PART_COUNTER_STATE_DESC); if (context.isRestored()) { buckets.initializeState(bucketStates, maxPartCountersState); } }
Example 5
Source File: BucketingSink.java From flink with Apache License 2.0 | 5 votes |
@Override public void initializeState(FunctionInitializationContext context) throws Exception { Preconditions.checkArgument(this.restoredBucketStates == null, "The operator has already been initialized."); try { initFileSystem(); } catch (IOException e) { LOG.error("Error while creating FileSystem when initializing the state of the BucketingSink.", e); throw new RuntimeException("Error while creating FileSystem when initializing the state of the BucketingSink.", e); } if (this.refTruncate == null) { this.refTruncate = reflectTruncate(fs); } // We are using JavaSerializer from the flink-runtime module here. This is very naughty and // we shouldn't be doing it because ideally nothing in the API modules/connector depends // directly on flink-runtime. We are doing it here because we need to maintain backwards // compatibility with old state and because we will have to rework/remove this code soon. OperatorStateStore stateStore = context.getOperatorStateStore(); this.restoredBucketStates = stateStore.getListState(new ListStateDescriptor<>("bucket-states", new JavaSerializer<>())); int subtaskIndex = getRuntimeContext().getIndexOfThisSubtask(); if (context.isRestored()) { LOG.info("Restoring state for the {} (taskIdx={}).", getClass().getSimpleName(), subtaskIndex); for (State<T> recoveredState : restoredBucketStates.get()) { handleRestoredBucketState(recoveredState); if (LOG.isDebugEnabled()) { LOG.debug("{} idx {} restored {}", getClass().getSimpleName(), subtaskIndex, recoveredState); } } } else { LOG.info("No state to restore for the {} (taskIdx={}).", getClass().getSimpleName(), subtaskIndex); } }
Example 6
Source File: ProcTimeCommitTigger.java From flink with Apache License 2.0 | 5 votes |
public ProcTimeCommitTigger( boolean isRestored, OperatorStateStore stateStore, Configuration conf, ProcessingTimeService procTimeService) throws Exception { this.pendingPartitionsState = stateStore.getListState(PENDING_PARTITIONS_STATE_DESC); this.pendingPartitions = new HashMap<>(); if (isRestored) { pendingPartitions.putAll(pendingPartitionsState.get().iterator().next()); } this.procTimeService = procTimeService; this.commitDelay = conf.get(SINK_PARTITION_COMMIT_DELAY).toMillis(); }
Example 7
Source File: BucketingSinkMigrationTest.java From flink with Apache License 2.0 | 4 votes |
/** * The actual paths in this depend on the binary checkpoint so it you update this the paths * here have to be updated as well. */ @Override public void initializeState(FunctionInitializationContext context) throws Exception { OperatorStateStore stateStore = context.getOperatorStateStore(); // We are using JavaSerializer from the flink-runtime module here. This is very naughty and // we shouldn't be doing it because ideally nothing in the API modules/connector depends // directly on flink-runtime. We are doing it here because we need to maintain backwards // compatibility with old state and because we will have to rework/remove this code soon. ListState<State<T>> restoredBucketStates = stateStore.getListState(new ListStateDescriptor<>("bucket-states", new JavaSerializer<>())); if (context.isRestored()) { for (State<T> states : restoredBucketStates.get()) { for (String bucketPath : states.bucketStates.keySet()) { BucketState state = states.getBucketState(new Path(bucketPath)); String current = state.currentFile; long validLength = state.currentFileValidLength; Assert.assertEquals(expectedBucketFilesPrefix + "4", current); Assert.assertEquals(6, validLength); List<String> pendingFiles = state.pendingFiles; assertTrue(pendingFiles.isEmpty()); final Map<Long, List<String>> pendingFilesPerCheckpoint = state.pendingFilesPerCheckpoint; Assert.assertEquals(1, pendingFilesPerCheckpoint.size()); for (Map.Entry<Long, List<String>> entry: pendingFilesPerCheckpoint.entrySet()) { long checkpoint = entry.getKey(); List<String> files = entry.getValue(); Assert.assertEquals(0L, checkpoint); Assert.assertEquals(4, files.size()); for (int i = 0; i < 4; i++) { Assert.assertEquals( expectedBucketFilesPrefix + i, files.get(i)); } } } } } initializeCalled = true; super.initializeState(context); }