Java Code Examples for org.apache.flink.contrib.streaming.state.RocksDBStateBackend#setDbStoragePath()
The following examples show how to use
org.apache.flink.contrib.streaming.state.RocksDBStateBackend#setDbStoragePath() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: EventTimeWindowCheckpointingITCase.java From flink with Apache License 2.0 | 6 votes |
private void setupRocksDB(Configuration config, int fileSizeThreshold, boolean incrementalCheckpoints) throws IOException { // Configure the managed memory size as 64MB per slot for rocksDB state backend. config.set(TaskManagerOptions.MANAGED_MEMORY_SIZE, MemorySize.ofMebiBytes(PARALLELISM / NUM_OF_TASK_MANAGERS * 64)); String rocksDb = tempFolder.newFolder().getAbsolutePath(); String backups = tempFolder.newFolder().getAbsolutePath(); // we use the fs backend with small threshold here to test the behaviour with file // references, not self contained byte handles RocksDBStateBackend rdb = new RocksDBStateBackend( new FsStateBackend( new Path("file://" + backups).toUri(), fileSizeThreshold), incrementalCheckpoints); rdb.setDbStoragePath(rocksDb); this.stateBackend = rdb; }
Example 2
Source File: RocksDBTtlStateTestBase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
StateBackend createStateBackend(TernaryBoolean enableIncrementalCheckpointing) { String dbPath; String checkpointPath; try { dbPath = tempFolder.newFolder().getAbsolutePath(); checkpointPath = tempFolder.newFolder().toURI().toString(); } catch (IOException e) { throw new FlinkRuntimeException("Failed to init rocksdb test state backend"); } RocksDBStateBackend backend = new RocksDBStateBackend(new FsStateBackend(checkpointPath), enableIncrementalCheckpointing); Configuration config = new Configuration(); config.setBoolean(TTL_COMPACT_FILTER_ENABLED, true); backend = backend.configure(config, Thread.currentThread().getContextClassLoader()); backend.setDbStoragePath(dbPath); return backend; }
Example 3
Source File: RocksDBTtlStateTestBase.java From flink with Apache License 2.0 | 6 votes |
StateBackend createStateBackend(TernaryBoolean enableIncrementalCheckpointing) { String dbPath; String checkpointPath; try { dbPath = tempFolder.newFolder().getAbsolutePath(); checkpointPath = tempFolder.newFolder().toURI().toString(); } catch (IOException e) { throw new FlinkRuntimeException("Failed to init rocksdb test state backend"); } RocksDBStateBackend backend = new RocksDBStateBackend(new FsStateBackend(checkpointPath), enableIncrementalCheckpointing); Configuration config = new Configuration(); config.setBoolean(TTL_COMPACT_FILTER_ENABLED, true); backend = backend.configure(config, Thread.currentThread().getContextClassLoader()); backend.setDbStoragePath(dbPath); return backend; }
Example 4
Source File: KeyedStateCheckpointingITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testWithRocksDbBackendIncremental() throws Exception { RocksDBStateBackend incRocksDbBackend = new RocksDBStateBackend(new MemoryStateBackend(MAX_MEM_STATE_SIZE), true); incRocksDbBackend.setDbStoragePath(tmpFolder.newFolder().getAbsolutePath()); testProgramWithBackend(incRocksDbBackend); }
Example 5
Source File: RocksDBTtlStateTestBase.java From flink with Apache License 2.0 | 5 votes |
StateBackend createStateBackend(TernaryBoolean enableIncrementalCheckpointing) { String dbPath; String checkpointPath; try { dbPath = tempFolder.newFolder().getAbsolutePath(); checkpointPath = tempFolder.newFolder().toURI().toString(); } catch (IOException e) { throw new FlinkRuntimeException("Failed to init rocksdb test state backend"); } RocksDBStateBackend backend = new RocksDBStateBackend(new FsStateBackend(checkpointPath), enableIncrementalCheckpointing); Configuration config = new Configuration(); backend = backend.configure(config, Thread.currentThread().getContextClassLoader()); backend.setDbStoragePath(dbPath); return backend; }
Example 6
Source File: KeyedStateCheckpointingITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testWithRocksDbBackendIncremental() throws Exception { RocksDBStateBackend incRocksDbBackend = new RocksDBStateBackend(new MemoryStateBackend(MAX_MEM_STATE_SIZE), true); incRocksDbBackend.setDbStoragePath(tmpFolder.newFolder().getAbsolutePath()); testProgramWithBackend(incRocksDbBackend); }
Example 7
Source File: KeyedStateCheckpointingITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testWithRocksDbBackendFull() throws Exception { RocksDBStateBackend fullRocksDbBackend = new RocksDBStateBackend(new MemoryStateBackend(MAX_MEM_STATE_SIZE), false); fullRocksDbBackend.setDbStoragePath(tmpFolder.newFolder().getAbsolutePath()); testProgramWithBackend(fullRocksDbBackend); }
Example 8
Source File: EventTimeWindowCheckpointingITCase.java From flink with Apache License 2.0 | 5 votes |
private void setupRocksDB(int fileSizeThreshold, boolean incrementalCheckpoints) throws IOException { String rocksDb = tempFolder.newFolder().getAbsolutePath(); String backups = tempFolder.newFolder().getAbsolutePath(); // we use the fs backend with small threshold here to test the behaviour with file // references, not self contained byte handles RocksDBStateBackend rdb = new RocksDBStateBackend( new FsStateBackend( new Path("file://" + backups).toUri(), fileSizeThreshold), incrementalCheckpoints); rdb.setDbStoragePath(rocksDb); this.stateBackend = rdb; }
Example 9
Source File: KeyedStateCheckpointingITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testWithRocksDbBackendIncremental() throws Exception { RocksDBStateBackend incRocksDbBackend = new RocksDBStateBackend(new MemoryStateBackend(MAX_MEM_STATE_SIZE), true); incRocksDbBackend.setDbStoragePath(tmpFolder.newFolder().getAbsolutePath()); testProgramWithBackend(incRocksDbBackend); }
Example 10
Source File: KeyedStateCheckpointingITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testWithRocksDbBackendFull() throws Exception { RocksDBStateBackend fullRocksDbBackend = new RocksDBStateBackend(new MemoryStateBackend(MAX_MEM_STATE_SIZE), false); fullRocksDbBackend.setDbStoragePath(tmpFolder.newFolder().getAbsolutePath()); testProgramWithBackend(fullRocksDbBackend); }
Example 11
Source File: EventTimeWindowCheckpointingITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void setupRocksDB(int fileSizeThreshold, boolean incrementalCheckpoints) throws IOException { String rocksDb = tempFolder.newFolder().getAbsolutePath(); String backups = tempFolder.newFolder().getAbsolutePath(); // we use the fs backend with small threshold here to test the behaviour with file // references, not self contained byte handles RocksDBStateBackend rdb = new RocksDBStateBackend( new FsStateBackend( new Path("file://" + backups).toUri(), fileSizeThreshold), incrementalCheckpoints); rdb.setDbStoragePath(rocksDb); this.stateBackend = rdb; }
Example 12
Source File: KeyedStateCheckpointingITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testWithRocksDbBackendFull() throws Exception { RocksDBStateBackend fullRocksDbBackend = new RocksDBStateBackend(new MemoryStateBackend(MAX_MEM_STATE_SIZE), false); fullRocksDbBackend.setDbStoragePath(tmpFolder.newFolder().getAbsolutePath()); testProgramWithBackend(fullRocksDbBackend); }
Example 13
Source File: CEPOperatorTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that the internal time of a CEP operator advances only given watermarks. See FLINK-5033 */ @Test public void testKeyedAdvancingTimeWithoutElements() throws Exception { final Event startEvent = new Event(42, "start", 1.0); final long watermarkTimestamp1 = 5L; final long watermarkTimestamp2 = 13L; final Map<String, List<Event>> expectedSequence = new HashMap<>(2); expectedSequence.put("start", Collections.<Event>singletonList(startEvent)); final OutputTag<Tuple2<Map<String, List<Event>>, Long>> timedOut = new OutputTag<Tuple2<Map<String, List<Event>>, Long>>("timedOut") {}; final KeyedOneInputStreamOperatorTestHarness<Integer, Event, Map<String, List<Event>>> harness = new KeyedOneInputStreamOperatorTestHarness<>( new CepOperator<>( Event.createTypeSerializer(), false, new NFAFactory(true), null, null, new TimedOutProcessFunction(timedOut), null), new KeySelector<Event, Integer>() { private static final long serialVersionUID = 7219185117566268366L; @Override public Integer getKey(Event value) throws Exception { return value.getId(); } }, BasicTypeInfo.INT_TYPE_INFO); try { String rocksDbPath = tempFolder.newFolder().getAbsolutePath(); RocksDBStateBackend rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend()); rocksDBStateBackend.setDbStoragePath(rocksDbPath); harness.setStateBackend(rocksDBStateBackend); harness.setup( new KryoSerializer<>( (Class<Map<String, List<Event>>>) (Object) Map.class, new ExecutionConfig())); harness.open(); harness.processElement(new StreamRecord<>(startEvent, 3L)); harness.processWatermark(new Watermark(watermarkTimestamp1)); harness.processWatermark(new Watermark(watermarkTimestamp2)); Queue<Object> result = harness.getOutput(); Queue<StreamRecord<Tuple2<Map<String, List<Event>>, Long>>> sideOutput = harness.getSideOutput(timedOut); assertEquals(2L, result.size()); assertEquals(1L, sideOutput.size()); Object watermark1 = result.poll(); assertTrue(watermark1 instanceof Watermark); assertEquals(watermarkTimestamp1, ((Watermark) watermark1).getTimestamp()); Tuple2<Map<String, List<Event>>, Long> leftResult = sideOutput.poll().getValue(); assertEquals(watermarkTimestamp2, (long) leftResult.f1); assertEquals(expectedSequence, leftResult.f0); Object watermark2 = result.poll(); assertTrue(watermark2 instanceof Watermark); assertEquals(watermarkTimestamp2, ((Watermark) watermark2).getTimestamp()); } finally { harness.close(); } }
Example 14
Source File: CEPOperatorTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testKeyedCEPOperatorNFAUpdateTimesWithRocksDB() throws Exception { String rocksDbPath = tempFolder.newFolder().getAbsolutePath(); RocksDBStateBackend rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend()); rocksDBStateBackend.setDbStoragePath(rocksDbPath); CepOperator<Event, Integer, Map<String, List<Event>>> operator = CepOperatorTestUtilities.getKeyedCepOpearator( true, new SimpleNFAFactory()); OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = CepOperatorTestUtilities.getCepTestHarness( operator); try { harness.setStateBackend(rocksDBStateBackend); harness.open(); final ValueState nfaOperatorState = (ValueState) Whitebox.<ValueState>getInternalState(operator, "computationStates"); final ValueState nfaOperatorStateSpy = Mockito.spy(nfaOperatorState); Whitebox.setInternalState(operator, "computationStates", nfaOperatorStateSpy); Event startEvent = new Event(42, "c", 1.0); SubEvent middleEvent = new SubEvent(42, "a", 1.0, 10.0); Event endEvent = new Event(42, "b", 1.0); harness.processElement(new StreamRecord<>(startEvent, 1L)); harness.processElement(new StreamRecord<>(new Event(42, "d", 1.0), 4L)); harness.processElement(new StreamRecord<Event>(middleEvent, 4L)); harness.processElement(new StreamRecord<>(endEvent, 4L)); // verify the number of invocations NFA is updated Mockito.verify(nfaOperatorStateSpy, Mockito.times(3)).update(Mockito.any()); // get and verify the output Queue<Object> result = harness.getOutput(); assertEquals(1, result.size()); verifyPattern(result.poll(), startEvent, middleEvent, endEvent); } finally { harness.close(); } }
Example 15
Source File: CEPOperatorTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testKeyedCEPOperatorNFAUpdateTimesWithRocksDB() throws Exception { String rocksDbPath = tempFolder.newFolder().getAbsolutePath(); RocksDBStateBackend rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend()); rocksDBStateBackend.setDbStoragePath(rocksDbPath); CepOperator<Event, Integer, Map<String, List<Event>>> operator = CepOperatorTestUtilities.getKeyedCepOpearator( true, new SimpleNFAFactory()); OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = CepOperatorTestUtilities.getCepTestHarness( operator); try { harness.setStateBackend(rocksDBStateBackend); harness.open(); final ValueState nfaOperatorState = (ValueState) Whitebox.<ValueState>getInternalState(operator, "computationStates"); final ValueState nfaOperatorStateSpy = Mockito.spy(nfaOperatorState); Whitebox.setInternalState(operator, "computationStates", nfaOperatorStateSpy); Event startEvent = new Event(42, "c", 1.0); SubEvent middleEvent = new SubEvent(42, "a", 1.0, 10.0); Event endEvent = new Event(42, "b", 1.0); harness.processElement(new StreamRecord<>(startEvent, 1L)); harness.processElement(new StreamRecord<>(new Event(42, "d", 1.0), 4L)); harness.processElement(new StreamRecord<Event>(middleEvent, 4L)); harness.processElement(new StreamRecord<>(endEvent, 4L)); // verify the number of invocations NFA is updated Mockito.verify(nfaOperatorStateSpy, Mockito.times(3)).update(Mockito.any()); // get and verify the output Queue<Object> result = harness.getOutput(); assertEquals(1, result.size()); verifyPattern(result.poll(), startEvent, middleEvent, endEvent); } finally { harness.close(); } }
Example 16
Source File: CEPOperatorTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Tests that the internal time of a CEP operator advances only given watermarks. See FLINK-5033 */ @Test public void testKeyedAdvancingTimeWithoutElements() throws Exception { final Event startEvent = new Event(42, "start", 1.0); final long watermarkTimestamp1 = 5L; final long watermarkTimestamp2 = 13L; final Map<String, List<Event>> expectedSequence = new HashMap<>(2); expectedSequence.put("start", Collections.<Event>singletonList(startEvent)); final OutputTag<Tuple2<Map<String, List<Event>>, Long>> timedOut = new OutputTag<Tuple2<Map<String, List<Event>>, Long>>("timedOut") {}; final KeyedOneInputStreamOperatorTestHarness<Integer, Event, Map<String, List<Event>>> harness = new KeyedOneInputStreamOperatorTestHarness<>( new CepOperator<>( Event.createTypeSerializer(), false, new NFAFactory(true), null, null, new TimedOutProcessFunction(timedOut), null), new KeySelector<Event, Integer>() { private static final long serialVersionUID = 7219185117566268366L; @Override public Integer getKey(Event value) throws Exception { return value.getId(); } }, BasicTypeInfo.INT_TYPE_INFO); try { String rocksDbPath = tempFolder.newFolder().getAbsolutePath(); RocksDBStateBackend rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend()); rocksDBStateBackend.setDbStoragePath(rocksDbPath); harness.setStateBackend(rocksDBStateBackend); harness.setup( new KryoSerializer<>( (Class<Map<String, List<Event>>>) (Object) Map.class, new ExecutionConfig())); harness.open(); harness.processElement(new StreamRecord<>(startEvent, 3L)); harness.processWatermark(new Watermark(watermarkTimestamp1)); harness.processWatermark(new Watermark(watermarkTimestamp2)); Queue<Object> result = harness.getOutput(); Queue<StreamRecord<Tuple2<Map<String, List<Event>>, Long>>> sideOutput = harness.getSideOutput(timedOut); assertEquals(2L, result.size()); assertEquals(1L, sideOutput.size()); Object watermark1 = result.poll(); assertTrue(watermark1 instanceof Watermark); assertEquals(watermarkTimestamp1, ((Watermark) watermark1).getTimestamp()); Tuple2<Map<String, List<Event>>, Long> leftResult = sideOutput.poll().getValue(); assertEquals(watermarkTimestamp2, (long) leftResult.f1); assertEquals(expectedSequence, leftResult.f0); Object watermark2 = result.poll(); assertTrue(watermark2 instanceof Watermark); assertEquals(watermarkTimestamp2, ((Watermark) watermark2).getTimestamp()); } finally { harness.close(); } }
Example 17
Source File: CEPOperatorTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that the internal time of a CEP operator advances only given watermarks. See FLINK-5033 */ @Test public void testKeyedAdvancingTimeWithoutElements() throws Exception { final Event startEvent = new Event(42, "start", 1.0); final long watermarkTimestamp1 = 5L; final long watermarkTimestamp2 = 13L; final Map<String, List<Event>> expectedSequence = new HashMap<>(2); expectedSequence.put("start", Collections.<Event>singletonList(startEvent)); final OutputTag<Tuple2<Map<String, List<Event>>, Long>> timedOut = new OutputTag<Tuple2<Map<String, List<Event>>, Long>>("timedOut") {}; final KeyedOneInputStreamOperatorTestHarness<Integer, Event, Map<String, List<Event>>> harness = new KeyedOneInputStreamOperatorTestHarness<>( new CepOperator<>( Event.createTypeSerializer(), false, new NFAFactory(true), null, null, new TimedOutProcessFunction(timedOut), null), new KeySelector<Event, Integer>() { private static final long serialVersionUID = 7219185117566268366L; @Override public Integer getKey(Event value) throws Exception { return value.getId(); } }, BasicTypeInfo.INT_TYPE_INFO); try { String rocksDbPath = tempFolder.newFolder().getAbsolutePath(); RocksDBStateBackend rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend()); rocksDBStateBackend.setDbStoragePath(rocksDbPath); harness.setStateBackend(rocksDBStateBackend); harness.setup( new KryoSerializer<>( (Class<Map<String, List<Event>>>) (Object) Map.class, new ExecutionConfig())); harness.open(); harness.processElement(new StreamRecord<>(startEvent, 3L)); harness.processWatermark(new Watermark(watermarkTimestamp1)); harness.processWatermark(new Watermark(watermarkTimestamp2)); Queue<Object> result = harness.getOutput(); Queue<StreamRecord<Tuple2<Map<String, List<Event>>, Long>>> sideOutput = harness.getSideOutput(timedOut); assertEquals(2L, result.size()); assertEquals(1L, sideOutput.size()); Object watermark1 = result.poll(); assertTrue(watermark1 instanceof Watermark); assertEquals(watermarkTimestamp1, ((Watermark) watermark1).getTimestamp()); Tuple2<Map<String, List<Event>>, Long> leftResult = sideOutput.poll().getValue(); assertEquals(watermarkTimestamp2, (long) leftResult.f1); assertEquals(expectedSequence, leftResult.f0); Object watermark2 = result.poll(); assertTrue(watermark2 instanceof Watermark); assertEquals(watermarkTimestamp2, ((Watermark) watermark2).getTimestamp()); } finally { harness.close(); } }
Example 18
Source File: CEPOperatorTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testKeyedCEPOperatorNFAUpdateTimesWithRocksDB() throws Exception { String rocksDbPath = tempFolder.newFolder().getAbsolutePath(); RocksDBStateBackend rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend()); rocksDBStateBackend.setDbStoragePath(rocksDbPath); CepOperator<Event, Integer, Map<String, List<Event>>> operator = CepOperatorTestUtilities.getKeyedCepOpearator( true, new SimpleNFAFactory()); OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = CepOperatorTestUtilities.getCepTestHarness( operator); try { harness.setStateBackend(rocksDBStateBackend); harness.open(); final ValueState nfaOperatorState = (ValueState) Whitebox.<ValueState>getInternalState(operator, "computationStates"); final ValueState nfaOperatorStateSpy = Mockito.spy(nfaOperatorState); Whitebox.setInternalState(operator, "computationStates", nfaOperatorStateSpy); Event startEvent = new Event(42, "c", 1.0); SubEvent middleEvent = new SubEvent(42, "a", 1.0, 10.0); Event endEvent = new Event(42, "b", 1.0); harness.processElement(new StreamRecord<>(startEvent, 1L)); harness.processElement(new StreamRecord<>(new Event(42, "d", 1.0), 4L)); harness.processElement(new StreamRecord<Event>(middleEvent, 4L)); harness.processElement(new StreamRecord<>(endEvent, 4L)); // verify the number of invocations NFA is updated Mockito.verify(nfaOperatorStateSpy, Mockito.times(3)).update(Mockito.any()); // get and verify the output Queue<Object> result = harness.getOutput(); assertEquals(1, result.size()); verifyPattern(result.poll(), startEvent, middleEvent, endEvent); } finally { harness.close(); } }