Java Code Examples for org.apache.flink.api.common.state.StateTtlConfig#isEnabled()
The following examples show how to use
org.apache.flink.api.common.state.StateTtlConfig#isEnabled() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: OuterJoinRecordStateViews.java From flink with Apache License 2.0 | 6 votes |
private InputSideHasUniqueKey( RuntimeContext ctx, String stateName, RowDataTypeInfo recordType, RowDataTypeInfo uniqueKeyType, KeySelector<RowData, RowData> uniqueKeySelector, StateTtlConfig ttlConfig) { checkNotNull(uniqueKeyType); checkNotNull(uniqueKeySelector); TupleTypeInfo<Tuple2<RowData, Integer>> valueTypeInfo = new TupleTypeInfo<>(recordType, Types.INT); MapStateDescriptor<RowData, Tuple2<RowData, Integer>> recordStateDesc = new MapStateDescriptor<>( stateName, uniqueKeyType, valueTypeInfo); if (ttlConfig.isEnabled()) { recordStateDesc.enableTimeToLive(ttlConfig); } this.recordState = ctx.getMapState(recordStateDesc); this.uniqueKeySelector = uniqueKeySelector; }
Example 2
Source File: JoinRecordStateViews.java From flink with Apache License 2.0 | 6 votes |
private InputSideHasUniqueKey( RuntimeContext ctx, String stateName, RowDataTypeInfo recordType, RowDataTypeInfo uniqueKeyType, KeySelector<RowData, RowData> uniqueKeySelector, StateTtlConfig ttlConfig) { checkNotNull(uniqueKeyType); checkNotNull(uniqueKeySelector); MapStateDescriptor<RowData, RowData> recordStateDesc = new MapStateDescriptor<>( stateName, uniqueKeyType, recordType); if (ttlConfig.isEnabled()) { recordStateDesc.enableTimeToLive(ttlConfig); } this.recordState = ctx.getMapState(recordStateDesc); this.uniqueKeySelector = uniqueKeySelector; }
Example 3
Source File: RocksDbTtlCompactFiltersManager.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public void configCompactFilter( @Nonnull StateDescriptor<?, ?> stateDesc, TypeSerializer<?> stateSerializer) { StateTtlConfig ttlConfig = stateDesc.getTtlConfig(); if (ttlConfig.isEnabled() && ttlConfig.getCleanupStrategies().inRocksdbCompactFilter()) { if (!enableTtlCompactionFilter) { LOG.warn("Cannot configure RocksDB TTL compaction filter for state <{}>: " + "feature is disabled for the state backend.", stateDesc.getName()); return; } FlinkCompactionFilterFactory compactionFilterFactory = compactionFilterFactories.get(stateDesc.getName()); Preconditions.checkNotNull(compactionFilterFactory); long ttl = ttlConfig.getTtl().toMilliseconds(); StateTtlConfig.RocksdbCompactFilterCleanupStrategy rocksdbCompactFilterCleanupStrategy = ttlConfig.getCleanupStrategies().getRocksdbCompactFilterCleanupStrategy(); Preconditions.checkNotNull(rocksdbCompactFilterCleanupStrategy); long queryTimeAfterNumEntries = rocksdbCompactFilterCleanupStrategy.getQueryTimeAfterNumEntries(); FlinkCompactionFilter.Config config; if (stateDesc instanceof ListStateDescriptor) { TypeSerializer<?> elemSerializer = ((ListSerializer<?>) stateSerializer).getElementSerializer(); int len = elemSerializer.getLength(); if (len > 0) { config = FlinkCompactionFilter.Config.createForFixedElementList( ttl, queryTimeAfterNumEntries, len + 1); // plus one byte for list element delimiter } else { config = FlinkCompactionFilter.Config.createForList( ttl, queryTimeAfterNumEntries, new ListElementFilterFactory<>(elemSerializer.duplicate())); } } else if (stateDesc instanceof MapStateDescriptor) { config = FlinkCompactionFilter.Config.createForMap(ttl, queryTimeAfterNumEntries); } else { config = FlinkCompactionFilter.Config.createForValue(ttl, queryTimeAfterNumEntries); } compactionFilterFactory.configure(config); } }
Example 4
Source File: RocksDbTtlCompactFiltersManager.java From flink with Apache License 2.0 | 5 votes |
public void configCompactFilter( @Nonnull StateDescriptor<?, ?> stateDesc, TypeSerializer<?> stateSerializer) { StateTtlConfig ttlConfig = stateDesc.getTtlConfig(); if (ttlConfig.isEnabled() && ttlConfig.getCleanupStrategies().inRocksdbCompactFilter()) { if (!enableTtlCompactionFilter) { LOG.warn("Cannot configure RocksDB TTL compaction filter for state <{}>: " + "feature is disabled for the state backend.", stateDesc.getName()); return; } FlinkCompactionFilterFactory compactionFilterFactory = compactionFilterFactories.get(stateDesc.getName()); Preconditions.checkNotNull(compactionFilterFactory); long ttl = ttlConfig.getTtl().toMilliseconds(); StateTtlConfig.RocksdbCompactFilterCleanupStrategy rocksdbCompactFilterCleanupStrategy = ttlConfig.getCleanupStrategies().getRocksdbCompactFilterCleanupStrategy(); Preconditions.checkNotNull(rocksdbCompactFilterCleanupStrategy); long queryTimeAfterNumEntries = rocksdbCompactFilterCleanupStrategy.getQueryTimeAfterNumEntries(); FlinkCompactionFilter.Config config; if (stateDesc instanceof ListStateDescriptor) { TypeSerializer<?> elemSerializer = ((ListSerializer<?>) stateSerializer).getElementSerializer(); int len = elemSerializer.getLength(); if (len > 0) { config = FlinkCompactionFilter.Config.createForFixedElementList( ttl, queryTimeAfterNumEntries, len + 1); // plus one byte for list element delimiter } else { config = FlinkCompactionFilter.Config.createForList( ttl, queryTimeAfterNumEntries, new ListElementFilterFactory<>(elemSerializer.duplicate())); } } else if (stateDesc instanceof MapStateDescriptor) { config = FlinkCompactionFilter.Config.createForMap(ttl, queryTimeAfterNumEntries); } else { config = FlinkCompactionFilter.Config.createForValue(ttl, queryTimeAfterNumEntries); } compactionFilterFactory.configure(config); } }
Example 5
Source File: OuterJoinRecordStateViews.java From flink with Apache License 2.0 | 5 votes |
private JoinKeyContainsUniqueKey(RuntimeContext ctx, String stateName, RowDataTypeInfo recordType, StateTtlConfig ttlConfig) { TupleTypeInfo<Tuple2<RowData, Integer>> valueTypeInfo = new TupleTypeInfo<>(recordType, Types.INT); ValueStateDescriptor<Tuple2<RowData, Integer>> recordStateDesc = new ValueStateDescriptor<>( stateName, valueTypeInfo); if (ttlConfig.isEnabled()) { recordStateDesc.enableTimeToLive(ttlConfig); } this.recordState = ctx.getState(recordStateDesc); // the result records always not more than 1 this.reusedRecordList = new ArrayList<>(1); this.reusedTupleList = new ArrayList<>(1); }
Example 6
Source File: OuterJoinRecordStateViews.java From flink with Apache License 2.0 | 5 votes |
private InputSideHasNoUniqueKey( RuntimeContext ctx, String stateName, RowDataTypeInfo recordType, StateTtlConfig ttlConfig) { TupleTypeInfo<Tuple2<Integer, Integer>> tupleTypeInfo = new TupleTypeInfo<>(Types.INT, Types.INT); MapStateDescriptor<RowData, Tuple2<Integer, Integer>> recordStateDesc = new MapStateDescriptor<>( stateName, recordType, tupleTypeInfo); if (ttlConfig.isEnabled()) { recordStateDesc.enableTimeToLive(ttlConfig); } this.recordState = ctx.getMapState(recordStateDesc); }
Example 7
Source File: JoinRecordStateViews.java From flink with Apache License 2.0 | 5 votes |
private JoinKeyContainsUniqueKey( RuntimeContext ctx, String stateName, RowDataTypeInfo recordType, StateTtlConfig ttlConfig) { ValueStateDescriptor<RowData> recordStateDesc = new ValueStateDescriptor<>( stateName, recordType); if (ttlConfig.isEnabled()) { recordStateDesc.enableTimeToLive(ttlConfig); } this.recordState = ctx.getState(recordStateDesc); // the result records always not more than 1 this.reusedList = new ArrayList<>(1); }
Example 8
Source File: JoinRecordStateViews.java From flink with Apache License 2.0 | 5 votes |
private InputSideHasNoUniqueKey( RuntimeContext ctx, String stateName, RowDataTypeInfo recordType, StateTtlConfig ttlConfig) { MapStateDescriptor<RowData, Integer> recordStateDesc = new MapStateDescriptor<>( stateName, recordType, Types.INT); if (ttlConfig.isEnabled()) { recordStateDesc.enableTimeToLive(ttlConfig); } this.recordState = ctx.getMapState(recordStateDesc); }
Example 9
Source File: MiniBatchDeduplicateKeepFirstRowFunction.java From flink with Apache License 2.0 | 5 votes |
@Override public void open(ExecutionContext ctx) throws Exception { super.open(ctx); ValueStateDescriptor<Boolean> stateDesc = new ValueStateDescriptor<>("existsState", Types.BOOLEAN); StateTtlConfig ttlConfig = createTtlConfig(minRetentionTime); if (ttlConfig.isEnabled()) { stateDesc.enableTimeToLive(ttlConfig); } state = ctx.getRuntimeContext().getState(stateDesc); }
Example 10
Source File: DeduplicateKeepFirstRowFunction.java From flink with Apache License 2.0 | 5 votes |
@Override public void open(Configuration configure) throws Exception { super.open(configure); ValueStateDescriptor<Boolean> stateDesc = new ValueStateDescriptor<>("existsState", Types.BOOLEAN); StateTtlConfig ttlConfig = createTtlConfig(minRetentionTime); if (ttlConfig.isEnabled()) { stateDesc.enableTimeToLive(ttlConfig); } state = getRuntimeContext().getState(stateDesc); }
Example 11
Source File: DeduplicateKeepLastRowFunction.java From flink with Apache License 2.0 | 5 votes |
@Override public void open(Configuration configure) throws Exception { super.open(configure); ValueStateDescriptor<RowData> stateDesc = new ValueStateDescriptor<>("preRowState", rowTypeInfo); StateTtlConfig ttlConfig = createTtlConfig(minRetentionTime); if (ttlConfig.isEnabled()) { stateDesc.enableTimeToLive(ttlConfig); } state = getRuntimeContext().getState(stateDesc); }
Example 12
Source File: MiniBatchDeduplicateKeepLastRowFunction.java From flink with Apache License 2.0 | 5 votes |
@Override public void open(ExecutionContext ctx) throws Exception { super.open(ctx); ValueStateDescriptor<RowData> stateDesc = new ValueStateDescriptor<>("preRowState", rowTypeInfo); StateTtlConfig ttlConfig = createTtlConfig(minRetentionTime); if (ttlConfig.isEnabled()) { stateDesc.enableTimeToLive(ttlConfig); } state = ctx.getRuntimeContext().getState(stateDesc); }
Example 13
Source File: RocksDbTtlCompactFiltersManager.java From flink with Apache License 2.0 | 5 votes |
public void configCompactFilter( @Nonnull StateDescriptor<?, ?> stateDesc, TypeSerializer<?> stateSerializer) { StateTtlConfig ttlConfig = stateDesc.getTtlConfig(); if (ttlConfig.isEnabled() && ttlConfig.getCleanupStrategies().inRocksdbCompactFilter()) { FlinkCompactionFilterFactory compactionFilterFactory = compactionFilterFactories.get(stateDesc.getName()); Preconditions.checkNotNull(compactionFilterFactory); long ttl = ttlConfig.getTtl().toMilliseconds(); StateTtlConfig.RocksdbCompactFilterCleanupStrategy rocksdbCompactFilterCleanupStrategy = ttlConfig.getCleanupStrategies().getRocksdbCompactFilterCleanupStrategy(); Preconditions.checkNotNull(rocksdbCompactFilterCleanupStrategy); long queryTimeAfterNumEntries = rocksdbCompactFilterCleanupStrategy.getQueryTimeAfterNumEntries(); FlinkCompactionFilter.Config config; if (stateDesc instanceof ListStateDescriptor) { TypeSerializer<?> elemSerializer = ((ListSerializer<?>) stateSerializer).getElementSerializer(); int len = elemSerializer.getLength(); if (len > 0) { config = FlinkCompactionFilter.Config.createForFixedElementList( ttl, queryTimeAfterNumEntries, len + 1); // plus one byte for list element delimiter } else { config = FlinkCompactionFilter.Config.createForList( ttl, queryTimeAfterNumEntries, new ListElementFilterFactory<>(elemSerializer.duplicate())); } } else if (stateDesc instanceof MapStateDescriptor) { config = FlinkCompactionFilter.Config.createForMap(ttl, queryTimeAfterNumEntries); } else { config = FlinkCompactionFilter.Config.createForValue(ttl, queryTimeAfterNumEntries); } compactionFilterFactory.configure(config); } }