Java Code Examples for org.apache.flink.runtime.state.StateBackendLoader#ROCKSDB_STATE_BACKEND_NAME
The following examples show how to use
org.apache.flink.runtime.state.StateBackendLoader#ROCKSDB_STATE_BACKEND_NAME .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LegacyStatefulJobSavepointMigrationITCase.java From flink with Apache License 2.0 | 5 votes |
private String getSavepointPath(MigrationVersion savepointVersion, String backendType) { switch (backendType) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: return "stateful-udf-migration-itcase-flink" + savepointVersion + "-rocksdb-savepoint"; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: return "stateful-udf-migration-itcase-flink" + savepointVersion + "-savepoint"; default: throw new UnsupportedOperationException(); } }
Example 2
Source File: StatefulJobWBroadcastStateMigrationITCase.java From flink with Apache License 2.0 | 5 votes |
private String getBroadcastSavepointPath(MigrationVersion savepointVersion, String backendType) { switch (backendType) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: return "new-stateful-broadcast-udf-migration-itcase-flink" + savepointVersion + "-rocksdb-savepoint"; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: return "new-stateful-broadcast-udf-migration-itcase-flink" + savepointVersion + "-savepoint"; default: throw new UnsupportedOperationException(); } }
Example 3
Source File: LegacyStatefulJobSavepointMigrationITCase.java From flink with Apache License 2.0 | 5 votes |
private String getSavepointPath(MigrationVersion savepointVersion, String backendType) { switch (backendType) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: return "stateful-udf-migration-itcase-flink" + savepointVersion + "-rocksdb-savepoint"; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: return "stateful-udf-migration-itcase-flink" + savepointVersion + "-savepoint"; default: throw new UnsupportedOperationException(); } }
Example 4
Source File: StatefulJobWBroadcastStateMigrationITCase.java From flink with Apache License 2.0 | 5 votes |
private String getBroadcastSavepointPath(MigrationVersion savepointVersion, String backendType) { switch (backendType) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: return "new-stateful-broadcast-udf-migration-itcase-flink" + savepointVersion + "-rocksdb-savepoint"; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: return "new-stateful-broadcast-udf-migration-itcase-flink" + savepointVersion + "-savepoint"; default: throw new UnsupportedOperationException(); } }
Example 5
Source File: StatefulJobSavepointMigrationITCase.java From flink with Apache License 2.0 | 5 votes |
private String getSavepointPath(MigrationVersion savepointVersion, String backendType) { switch (backendType) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: return "new-stateful-udf-migration-itcase-flink" + savepointVersion + "-rocksdb-savepoint"; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: return "new-stateful-udf-migration-itcase-flink" + savepointVersion + "-savepoint"; default: throw new UnsupportedOperationException(); } }
Example 6
Source File: StatefulJobWBroadcastStateMigrationITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private String getBroadcastSavepointPath(MigrationVersion savepointVersion, String backendType) { switch (backendType) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: return "new-stateful-broadcast-udf-migration-itcase-flink" + savepointVersion + "-rocksdb-savepoint"; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: return "new-stateful-broadcast-udf-migration-itcase-flink" + savepointVersion + "-savepoint"; default: throw new UnsupportedOperationException(); } }
Example 7
Source File: StatefulJobSavepointMigrationITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private String getSavepointPath(MigrationVersion savepointVersion, String backendType) { switch (backendType) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: return "new-stateful-udf-migration-itcase-flink" + savepointVersion + "-rocksdb-savepoint"; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: return "new-stateful-udf-migration-itcase-flink" + savepointVersion + "-savepoint"; default: throw new UnsupportedOperationException(); } }
Example 8
Source File: LegacyStatefulJobSavepointMigrationITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private String getSavepointPath(MigrationVersion savepointVersion, String backendType) { switch (backendType) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: return "stateful-udf-migration-itcase-flink" + savepointVersion + "-rocksdb-savepoint"; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: return "stateful-udf-migration-itcase-flink" + savepointVersion + "-savepoint"; default: throw new UnsupportedOperationException(); } }
Example 9
Source File: TypeSerializerSnapshotMigrationITCase.java From flink with Apache License 2.0 | 4 votes |
@Test public void testSavepoint() throws Exception { final int parallelism = 1; final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setRestartStrategy(RestartStrategies.noRestart()); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); switch (testStateBackend) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend())); break; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: env.setStateBackend(new MemoryStateBackend()); break; default: throw new UnsupportedOperationException(); } env.enableCheckpointing(500); env.setParallelism(parallelism); env.setMaxParallelism(parallelism); SourceFunction<Tuple2<Long, Long>> nonParallelSource = new MigrationTestUtils.CheckpointingNonParallelSourceWithListState(NUM_SOURCE_ELEMENTS); env.addSource(nonParallelSource) .keyBy(0) .map(new TestMapFunction()) .addSink(new MigrationTestUtils.AccumulatorCountingSink<>()); if (executionMode == ExecutionMode.PERFORM_SAVEPOINT) { executeAndSavepoint( env, "src/test/resources/" + getSavepointPath(testMigrateVersion, testStateBackend), new Tuple2<>(MigrationTestUtils.AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS)); } else { restoreAndExecute( env, getResourceFilename(getSavepointPath(testMigrateVersion, testStateBackend)), new Tuple2<>(MigrationTestUtils.AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS)); } }
Example 10
Source File: LegacyStatefulJobSavepointMigrationITCase.java From flink with Apache License 2.0 | 4 votes |
/** * Manually run this to write binary snapshot data. */ @Test @Ignore public void writeSavepoint() throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); switch (flinkGenerateSavepointBackendType) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend())); break; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: env.setStateBackend(new MemoryStateBackend()); break; default: throw new UnsupportedOperationException(); } env.enableCheckpointing(500); env.setParallelism(4); env.setMaxParallelism(4); env .addSource(new LegacyCheckpointedSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource") .flatMap(new LegacyCheckpointedFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap") .keyBy(0) .flatMap(new LegacyCheckpointedFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState") .keyBy(0) .flatMap(new KeyedStateSettingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap") .keyBy(0) .transform( "custom_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), new CheckpointedUdfOperator(new LegacyCheckpointedFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator") .keyBy(0) .transform( "timely_stateful_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), new TimelyStatefulOperator()).uid("TimelyStatefulOperator") .addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>()); executeAndSavepoint( env, "src/test/resources/" + getSavepointPath(flinkGenerateSavepointVersion, flinkGenerateSavepointBackendType), new Tuple2<>(AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS)); }
Example 11
Source File: LegacyStatefulJobSavepointMigrationITCase.java From flink with Apache License 2.0 | 4 votes |
@Test public void testSavepointRestore() throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setRestartStrategy(RestartStrategies.noRestart()); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); switch (testStateBackend) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend())); break; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: env.setStateBackend(new MemoryStateBackend()); break; default: throw new UnsupportedOperationException(); } env.enableCheckpointing(500); env.setParallelism(4); env.setMaxParallelism(4); env .addSource(new CheckingRestoringSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource") .flatMap(new CheckingRestoringFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap") .keyBy(0) .flatMap(new CheckingRestoringFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState") .keyBy(0) .flatMap(new CheckingKeyedStateFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap") .keyBy(0) .transform( "custom_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), new CheckingRestoringUdfOperator(new CheckingRestoringFlatMapWithKeyedStateInOperator())).uid("LegacyCheckpointedOperator") .keyBy(0) .transform( "timely_stateful_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), new CheckingTimelyStatefulOperator()).uid("TimelyStatefulOperator") .addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>()); restoreAndExecute( env, getResourceFilename(getSavepointPath(testMigrateVersion, testStateBackend)), new Tuple2<>(CheckingRestoringSource.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, 1), new Tuple2<>(CheckingRestoringFlatMap.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingRestoringFlatMapWithKeyedState.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingKeyedStateFlatMap.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingRestoringUdfOperator.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingRestoringFlatMapWithKeyedStateInOperator.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESS_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_EVENT_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESSING_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS)); }
Example 12
Source File: TypeSerializerSnapshotMigrationITCase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testSavepoint() throws Exception { final int parallelism = 1; final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setRestartStrategy(RestartStrategies.noRestart()); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); switch (testStateBackend) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend())); break; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: env.setStateBackend(new MemoryStateBackend()); break; default: throw new UnsupportedOperationException(); } env.enableCheckpointing(500); env.setParallelism(parallelism); env.setMaxParallelism(parallelism); SourceFunction<Tuple2<Long, Long>> nonParallelSource = new MigrationTestUtils.CheckpointingNonParallelSourceWithListState(NUM_SOURCE_ELEMENTS); env.addSource(nonParallelSource) .keyBy(0) .map(new TestMapFunction()) .addSink(new MigrationTestUtils.AccumulatorCountingSink<>()); if (executionMode == ExecutionMode.PERFORM_SAVEPOINT) { executeAndSavepoint( env, "src/test/resources/" + getSavepointPath(testMigrateVersion, testStateBackend), new Tuple2<>(MigrationTestUtils.AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS)); } else { restoreAndExecute( env, getResourceFilename(getSavepointPath(testMigrateVersion, testStateBackend)), new Tuple2<>(MigrationTestUtils.AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS)); } }
Example 13
Source File: StatefulJobSavepointMigrationITCase.java From flink with Apache License 2.0 | 4 votes |
@Test public void testSavepoint() throws Exception { final int parallelism = 4; final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setRestartStrategy(RestartStrategies.noRestart()); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); switch (testStateBackend) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend())); break; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: env.setStateBackend(new MemoryStateBackend()); break; default: throw new UnsupportedOperationException(); } env.enableCheckpointing(500); env.setParallelism(parallelism); env.setMaxParallelism(parallelism); SourceFunction<Tuple2<Long, Long>> nonParallelSource; SourceFunction<Tuple2<Long, Long>> parallelSource; RichFlatMapFunction<Tuple2<Long, Long>, Tuple2<Long, Long>> flatMap; OneInputStreamOperator<Tuple2<Long, Long>, Tuple2<Long, Long>> timelyOperator; if (executionMode == ExecutionMode.PERFORM_SAVEPOINT) { nonParallelSource = new MigrationTestUtils.CheckpointingNonParallelSourceWithListState(NUM_SOURCE_ELEMENTS); parallelSource = new MigrationTestUtils.CheckpointingParallelSourceWithUnionListState(NUM_SOURCE_ELEMENTS); flatMap = new CheckpointingKeyedStateFlatMap(); timelyOperator = new CheckpointingTimelyStatefulOperator(); } else if (executionMode == ExecutionMode.VERIFY_SAVEPOINT) { nonParallelSource = new MigrationTestUtils.CheckingNonParallelSourceWithListState(NUM_SOURCE_ELEMENTS); parallelSource = new MigrationTestUtils.CheckingParallelSourceWithUnionListState(NUM_SOURCE_ELEMENTS); flatMap = new CheckingKeyedStateFlatMap(); timelyOperator = new CheckingTimelyStatefulOperator(); } else { throw new IllegalStateException("Unknown ExecutionMode " + executionMode); } env .addSource(nonParallelSource).uid("CheckpointingSource1") .keyBy(0) .flatMap(flatMap).startNewChain().uid("CheckpointingKeyedStateFlatMap1") .keyBy(0) .transform( "timely_stateful_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), timelyOperator).uid("CheckpointingTimelyStatefulOperator1") .addSink(new MigrationTestUtils.AccumulatorCountingSink<>()); env .addSource(parallelSource).uid("CheckpointingSource2") .keyBy(0) .flatMap(flatMap).startNewChain().uid("CheckpointingKeyedStateFlatMap2") .keyBy(0) .transform( "timely_stateful_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), timelyOperator).uid("CheckpointingTimelyStatefulOperator2") .addSink(new MigrationTestUtils.AccumulatorCountingSink<>()); if (executionMode == ExecutionMode.PERFORM_SAVEPOINT) { executeAndSavepoint( env, "src/test/resources/" + getSavepointPath(testMigrateVersion, testStateBackend), new Tuple2<>(MigrationTestUtils.AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2)); } else { restoreAndExecute( env, getResourceFilename(getSavepointPath(testMigrateVersion, testStateBackend)), new Tuple2<>(MigrationTestUtils.CheckingNonParallelSourceWithListState.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, 1), new Tuple2<>(MigrationTestUtils.CheckingParallelSourceWithUnionListState.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, parallelism), new Tuple2<>(CheckingKeyedStateFlatMap.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESS_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_EVENT_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESSING_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(MigrationTestUtils.AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2)); } }
Example 14
Source File: StatefulJobSavepointMigrationITCase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testSavepoint() throws Exception { final int parallelism = 4; final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setRestartStrategy(RestartStrategies.noRestart()); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); switch (testStateBackend) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend())); break; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: env.setStateBackend(new MemoryStateBackend()); break; default: throw new UnsupportedOperationException(); } env.enableCheckpointing(500); env.setParallelism(parallelism); env.setMaxParallelism(parallelism); SourceFunction<Tuple2<Long, Long>> nonParallelSource; SourceFunction<Tuple2<Long, Long>> parallelSource; RichFlatMapFunction<Tuple2<Long, Long>, Tuple2<Long, Long>> flatMap; OneInputStreamOperator<Tuple2<Long, Long>, Tuple2<Long, Long>> timelyOperator; if (executionMode == ExecutionMode.PERFORM_SAVEPOINT) { nonParallelSource = new MigrationTestUtils.CheckpointingNonParallelSourceWithListState(NUM_SOURCE_ELEMENTS); parallelSource = new MigrationTestUtils.CheckpointingParallelSourceWithUnionListState(NUM_SOURCE_ELEMENTS); flatMap = new CheckpointingKeyedStateFlatMap(); timelyOperator = new CheckpointingTimelyStatefulOperator(); } else if (executionMode == ExecutionMode.VERIFY_SAVEPOINT) { nonParallelSource = new MigrationTestUtils.CheckingNonParallelSourceWithListState(NUM_SOURCE_ELEMENTS); parallelSource = new MigrationTestUtils.CheckingParallelSourceWithUnionListState(NUM_SOURCE_ELEMENTS); flatMap = new CheckingKeyedStateFlatMap(); timelyOperator = new CheckingTimelyStatefulOperator(); } else { throw new IllegalStateException("Unknown ExecutionMode " + executionMode); } env .addSource(nonParallelSource).uid("CheckpointingSource1") .keyBy(0) .flatMap(flatMap).startNewChain().uid("CheckpointingKeyedStateFlatMap1") .keyBy(0) .transform( "timely_stateful_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), timelyOperator).uid("CheckpointingTimelyStatefulOperator1") .addSink(new MigrationTestUtils.AccumulatorCountingSink<>()); env .addSource(parallelSource).uid("CheckpointingSource2") .keyBy(0) .flatMap(flatMap).startNewChain().uid("CheckpointingKeyedStateFlatMap2") .keyBy(0) .transform( "timely_stateful_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), timelyOperator).uid("CheckpointingTimelyStatefulOperator2") .addSink(new MigrationTestUtils.AccumulatorCountingSink<>()); if (executionMode == ExecutionMode.PERFORM_SAVEPOINT) { executeAndSavepoint( env, "src/test/resources/" + getSavepointPath(testMigrateVersion, testStateBackend), new Tuple2<>(MigrationTestUtils.AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2)); } else { restoreAndExecute( env, getResourceFilename(getSavepointPath(testMigrateVersion, testStateBackend)), new Tuple2<>(MigrationTestUtils.CheckingNonParallelSourceWithListState.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, 1), new Tuple2<>(MigrationTestUtils.CheckingParallelSourceWithUnionListState.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, parallelism), new Tuple2<>(CheckingKeyedStateFlatMap.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESS_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_EVENT_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESSING_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(MigrationTestUtils.AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2)); } }
Example 15
Source File: TypeSerializerSnapshotMigrationITCase.java From flink with Apache License 2.0 | 4 votes |
@Test public void testSavepoint() throws Exception { final int parallelism = 1; final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setRestartStrategy(RestartStrategies.noRestart()); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); switch (testStateBackend) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend())); break; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: env.setStateBackend(new MemoryStateBackend()); break; default: throw new UnsupportedOperationException(); } env.enableCheckpointing(500); env.setParallelism(parallelism); env.setMaxParallelism(parallelism); SourceFunction<Tuple2<Long, Long>> nonParallelSource = new MigrationTestUtils.CheckpointingNonParallelSourceWithListState(NUM_SOURCE_ELEMENTS); env.addSource(nonParallelSource) .keyBy(0) .map(new TestMapFunction()) .addSink(new MigrationTestUtils.AccumulatorCountingSink<>()); if (executionMode == ExecutionMode.PERFORM_SAVEPOINT) { executeAndSavepoint( env, "src/test/resources/" + getSavepointPath(testMigrateVersion, testStateBackend), new Tuple2<>(MigrationTestUtils.AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS)); } else { restoreAndExecute( env, getResourceFilename(getSavepointPath(testMigrateVersion, testStateBackend)), new Tuple2<>(MigrationTestUtils.AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS)); } }
Example 16
Source File: LegacyStatefulJobSavepointMigrationITCase.java From flink with Apache License 2.0 | 4 votes |
/** * Manually run this to write binary snapshot data. */ @Test @Ignore public void writeSavepoint() throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); switch (flinkGenerateSavepointBackendType) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend())); break; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: env.setStateBackend(new MemoryStateBackend()); break; default: throw new UnsupportedOperationException(); } env.enableCheckpointing(500); env.setParallelism(4); env.setMaxParallelism(4); env .addSource(new LegacyCheckpointedSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource") .flatMap(new LegacyCheckpointedFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap") .keyBy(0) .flatMap(new LegacyCheckpointedFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState") .keyBy(0) .flatMap(new KeyedStateSettingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap") .keyBy(0) .transform( "custom_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), new CheckpointedUdfOperator(new LegacyCheckpointedFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator") .keyBy(0) .transform( "timely_stateful_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), new TimelyStatefulOperator()).uid("TimelyStatefulOperator") .addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>()); executeAndSavepoint( env, "src/test/resources/" + getSavepointPath(flinkGenerateSavepointVersion, flinkGenerateSavepointBackendType), new Tuple2<>(AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS)); }
Example 17
Source File: LegacyStatefulJobSavepointMigrationITCase.java From flink with Apache License 2.0 | 4 votes |
@Test public void testSavepointRestore() throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setRestartStrategy(RestartStrategies.noRestart()); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); switch (testStateBackend) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend())); break; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: env.setStateBackend(new MemoryStateBackend()); break; default: throw new UnsupportedOperationException(); } env.enableCheckpointing(500); env.setParallelism(4); env.setMaxParallelism(4); env .addSource(new CheckingRestoringSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource") .flatMap(new CheckingRestoringFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap") .keyBy(0) .flatMap(new CheckingRestoringFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState") .keyBy(0) .flatMap(new CheckingKeyedStateFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap") .keyBy(0) .transform( "custom_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), new CheckingRestoringUdfOperator(new CheckingRestoringFlatMapWithKeyedStateInOperator())).uid("LegacyCheckpointedOperator") .keyBy(0) .transform( "timely_stateful_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), new CheckingTimelyStatefulOperator()).uid("TimelyStatefulOperator") .addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>()); restoreAndExecute( env, getResourceFilename(getSavepointPath(testMigrateVersion, testStateBackend)), new Tuple2<>(CheckingRestoringSource.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, 1), new Tuple2<>(CheckingRestoringFlatMap.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingRestoringFlatMapWithKeyedState.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingKeyedStateFlatMap.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingRestoringUdfOperator.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingRestoringFlatMapWithKeyedStateInOperator.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESS_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_EVENT_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESSING_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS)); }
Example 18
Source File: LegacyStatefulJobSavepointMigrationITCase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testSavepointRestore() throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setRestartStrategy(RestartStrategies.noRestart()); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); switch (testStateBackend) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend())); break; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: env.setStateBackend(new MemoryStateBackend()); break; default: throw new UnsupportedOperationException(); } env.enableCheckpointing(500); env.setParallelism(4); env.setMaxParallelism(4); env .addSource(new CheckingRestoringSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource") .flatMap(new CheckingRestoringFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap") .keyBy(0) .flatMap(new CheckingRestoringFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState") .keyBy(0) .flatMap(new CheckingKeyedStateFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap") .keyBy(0) .transform( "custom_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), new CheckingRestoringUdfOperator(new CheckingRestoringFlatMapWithKeyedStateInOperator())).uid("LegacyCheckpointedOperator") .keyBy(0) .transform( "timely_stateful_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), new CheckingTimelyStatefulOperator()).uid("TimelyStatefulOperator") .addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>()); restoreAndExecute( env, getResourceFilename(getSavepointPath(testMigrateVersion, testStateBackend)), new Tuple2<>(CheckingRestoringSource.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, 1), new Tuple2<>(CheckingRestoringFlatMap.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingRestoringFlatMapWithKeyedState.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingKeyedStateFlatMap.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingRestoringUdfOperator.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingRestoringFlatMapWithKeyedStateInOperator.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESS_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_EVENT_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESSING_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS), new Tuple2<>(AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS)); }
Example 19
Source File: StatefulJobSavepointMigrationITCase.java From flink with Apache License 2.0 | 4 votes |
@Test public void testSavepoint() throws Exception { final int parallelism = 4; final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setRestartStrategy(RestartStrategies.noRestart()); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); switch (testStateBackend) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend())); break; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: env.setStateBackend(new MemoryStateBackend()); break; default: throw new UnsupportedOperationException(); } env.enableCheckpointing(500); env.setParallelism(parallelism); env.setMaxParallelism(parallelism); SourceFunction<Tuple2<Long, Long>> nonParallelSource; SourceFunction<Tuple2<Long, Long>> parallelSource; RichFlatMapFunction<Tuple2<Long, Long>, Tuple2<Long, Long>> flatMap; OneInputStreamOperator<Tuple2<Long, Long>, Tuple2<Long, Long>> timelyOperator; if (executionMode == ExecutionMode.PERFORM_SAVEPOINT) { nonParallelSource = new MigrationTestUtils.CheckpointingNonParallelSourceWithListState(NUM_SOURCE_ELEMENTS); parallelSource = new MigrationTestUtils.CheckpointingParallelSourceWithUnionListState(NUM_SOURCE_ELEMENTS); flatMap = new CheckpointingKeyedStateFlatMap(); timelyOperator = new CheckpointingTimelyStatefulOperator(); } else if (executionMode == ExecutionMode.VERIFY_SAVEPOINT) { nonParallelSource = new MigrationTestUtils.CheckingNonParallelSourceWithListState(NUM_SOURCE_ELEMENTS); parallelSource = new MigrationTestUtils.CheckingParallelSourceWithUnionListState(NUM_SOURCE_ELEMENTS); flatMap = new CheckingKeyedStateFlatMap(); timelyOperator = new CheckingTimelyStatefulOperator(); } else { throw new IllegalStateException("Unknown ExecutionMode " + executionMode); } env .addSource(nonParallelSource).uid("CheckpointingSource1") .keyBy(0) .flatMap(flatMap).startNewChain().uid("CheckpointingKeyedStateFlatMap1") .keyBy(0) .transform( "timely_stateful_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), timelyOperator).uid("CheckpointingTimelyStatefulOperator1") .addSink(new MigrationTestUtils.AccumulatorCountingSink<>()); env .addSource(parallelSource).uid("CheckpointingSource2") .keyBy(0) .flatMap(flatMap).startNewChain().uid("CheckpointingKeyedStateFlatMap2") .keyBy(0) .transform( "timely_stateful_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), timelyOperator).uid("CheckpointingTimelyStatefulOperator2") .addSink(new MigrationTestUtils.AccumulatorCountingSink<>()); if (executionMode == ExecutionMode.PERFORM_SAVEPOINT) { executeAndSavepoint( env, "src/test/resources/" + getSavepointPath(testMigrateVersion, testStateBackend), new Tuple2<>(MigrationTestUtils.AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2)); } else { restoreAndExecute( env, getResourceFilename(getSavepointPath(testMigrateVersion, testStateBackend)), new Tuple2<>(MigrationTestUtils.CheckingNonParallelSourceWithListState.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, 1), new Tuple2<>(MigrationTestUtils.CheckingParallelSourceWithUnionListState.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, parallelism), new Tuple2<>(CheckingKeyedStateFlatMap.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESS_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_EVENT_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESSING_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2), new Tuple2<>(MigrationTestUtils.AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS * 2)); } }
Example 20
Source File: LegacyStatefulJobSavepointMigrationITCase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Manually run this to write binary snapshot data. */ @Test @Ignore public void writeSavepoint() throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); switch (flinkGenerateSavepointBackendType) { case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME: env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend())); break; case StateBackendLoader.MEMORY_STATE_BACKEND_NAME: env.setStateBackend(new MemoryStateBackend()); break; default: throw new UnsupportedOperationException(); } env.enableCheckpointing(500); env.setParallelism(4); env.setMaxParallelism(4); env .addSource(new LegacyCheckpointedSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource") .flatMap(new LegacyCheckpointedFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap") .keyBy(0) .flatMap(new LegacyCheckpointedFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState") .keyBy(0) .flatMap(new KeyedStateSettingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap") .keyBy(0) .transform( "custom_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), new CheckpointedUdfOperator(new LegacyCheckpointedFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator") .keyBy(0) .transform( "timely_stateful_operator", new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(), new TimelyStatefulOperator()).uid("TimelyStatefulOperator") .addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>()); executeAndSavepoint( env, "src/test/resources/" + getSavepointPath(flinkGenerateSavepointVersion, flinkGenerateSavepointBackendType), new Tuple2<>(AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS)); }