org.apache.flink.mock.Whitebox Java Examples
The following examples show how to use
org.apache.flink.mock.Whitebox.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TimerServiceTest.java From flink with Apache License 2.0 | 6 votes |
/** * Test all timeouts registered can be unregistered * @throws Exception */ @Test @SuppressWarnings("unchecked") public void testUnregisterAllTimeouts() throws Exception { // Prepare all instances. ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class); ScheduledFuture scheduledFuture = mock(ScheduledFuture.class); when(scheduledExecutorService.schedule(any(Runnable.class), anyLong(), any(TimeUnit.class))) .thenReturn(scheduledFuture); TimerService<AllocationID> timerService = new TimerService<>(scheduledExecutorService, 100L); TimeoutListener<AllocationID> listener = mock(TimeoutListener.class); timerService.start(listener); // Invoke register and unregister. timerService.registerTimeout(new AllocationID(), 10, TimeUnit.SECONDS); timerService.registerTimeout(new AllocationID(), 10, TimeUnit.SECONDS); timerService.unregisterAllTimeouts(); // Verify. Map<?, ?> timeouts = (Map<?, ?>) Whitebox.getInternalState(timerService, "timeouts"); assertTrue(timeouts.isEmpty()); verify(scheduledFuture, times(2)).cancel(true); }
Example #2
Source File: TimerServiceTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Test all timeouts registered can be unregistered * @throws Exception */ @Test @SuppressWarnings("unchecked") public void testUnregisterAllTimeouts() throws Exception { // Prepare all instances. ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class); ScheduledFuture scheduledFuture = mock(ScheduledFuture.class); when(scheduledExecutorService.schedule(any(Runnable.class), anyLong(), any(TimeUnit.class))) .thenReturn(scheduledFuture); TimerService<AllocationID> timerService = new TimerService<>(scheduledExecutorService, 100L); TimeoutListener<AllocationID> listener = mock(TimeoutListener.class); timerService.start(listener); // Invoke register and unregister. timerService.registerTimeout(new AllocationID(), 10, TimeUnit.SECONDS); timerService.registerTimeout(new AllocationID(), 10, TimeUnit.SECONDS); timerService.unregisterAllTimeouts(); // Verify. Map<?, ?> timeouts = (Map<?, ?>) Whitebox.getInternalState(timerService, "timeouts"); assertTrue(timeouts.isEmpty()); verify(scheduledFuture, times(2)).cancel(true); }
Example #3
Source File: TimerServiceTest.java From flink with Apache License 2.0 | 6 votes |
/** * Test all timeouts registered can be unregistered * @throws Exception */ @Test @SuppressWarnings("unchecked") public void testUnregisterAllTimeouts() throws Exception { // Prepare all instances. ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class); ScheduledFuture scheduledFuture = mock(ScheduledFuture.class); when(scheduledExecutorService.schedule(any(Runnable.class), anyLong(), any(TimeUnit.class))) .thenReturn(scheduledFuture); TimerService<AllocationID> timerService = new TimerService<>(scheduledExecutorService, 100L); TimeoutListener<AllocationID> listener = mock(TimeoutListener.class); timerService.start(listener); // Invoke register and unregister. timerService.registerTimeout(new AllocationID(), 10, TimeUnit.SECONDS); timerService.registerTimeout(new AllocationID(), 10, TimeUnit.SECONDS); timerService.unregisterAllTimeouts(); // Verify. Map<?, ?> timeouts = (Map<?, ?>) Whitebox.getInternalState(timerService, "timeouts"); assertTrue(timeouts.isEmpty()); verify(scheduledFuture, times(2)).cancel(true); }
Example #4
Source File: CEPOperatorTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testKeyedCEPOperatorNFAUpdateTimes() throws Exception { CepOperator<Event, Integer, Map<String, List<Event>>> operator = CepOperatorTestUtilities.getKeyedCepOpearator( true, new SimpleNFAFactory()); OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = CepOperatorTestUtilities.getCepTestHarness(operator); try { harness.open(); final ValueState nfaOperatorState = (ValueState) Whitebox.<ValueState>getInternalState(operator, "computationStates"); final ValueState nfaOperatorStateSpy = Mockito.spy(nfaOperatorState); Whitebox.setInternalState(operator, "computationStates", nfaOperatorStateSpy); Event startEvent = new Event(42, "c", 1.0); SubEvent middleEvent = new SubEvent(42, "a", 1.0, 10.0); Event endEvent = new Event(42, "b", 1.0); harness.processElement(new StreamRecord<>(startEvent, 1L)); harness.processElement(new StreamRecord<>(new Event(42, "d", 1.0), 4L)); harness.processElement(new StreamRecord<Event>(middleEvent, 4L)); harness.processElement(new StreamRecord<>(endEvent, 4L)); // verify the number of invocations NFA is updated Mockito.verify(nfaOperatorStateSpy, Mockito.times(3)).update(Mockito.any()); // get and verify the output Queue<Object> result = harness.getOutput(); assertEquals(1, result.size()); verifyPattern(result.poll(), startEvent, middleEvent, endEvent); } finally { harness.close(); } }
Example #5
Source File: CEPOperatorTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testKeyedCEPOperatorNFAUpdateTimes() throws Exception { CepOperator<Event, Integer, Map<String, List<Event>>> operator = CepOperatorTestUtilities.getKeyedCepOpearator( true, new SimpleNFAFactory()); OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = CepOperatorTestUtilities.getCepTestHarness(operator); try { harness.open(); final ValueState nfaOperatorState = (ValueState) Whitebox.<ValueState>getInternalState(operator, "computationStates"); final ValueState nfaOperatorStateSpy = Mockito.spy(nfaOperatorState); Whitebox.setInternalState(operator, "computationStates", nfaOperatorStateSpy); Event startEvent = new Event(42, "c", 1.0); SubEvent middleEvent = new SubEvent(42, "a", 1.0, 10.0); Event endEvent = new Event(42, "b", 1.0); harness.processElement(new StreamRecord<>(startEvent, 1L)); harness.processElement(new StreamRecord<>(new Event(42, "d", 1.0), 4L)); harness.processElement(new StreamRecord<Event>(middleEvent, 4L)); harness.processElement(new StreamRecord<>(endEvent, 4L)); // verify the number of invocations NFA is updated Mockito.verify(nfaOperatorStateSpy, Mockito.times(3)).update(Mockito.any()); // get and verify the output Queue<Object> result = harness.getOutput(); assertEquals(1, result.size()); verifyPattern(result.poll(), startEvent, middleEvent, endEvent); } finally { harness.close(); } }
Example #6
Source File: AvroOutputFormatTest.java From flink with Apache License 2.0 | 5 votes |
private void serializeAndDeserialize(final AvroOutputFormat.Codec codec, final Schema schema) throws IOException, ClassNotFoundException { // given final AvroOutputFormat<User> outputFormat = new AvroOutputFormat<>(User.class); if (codec != null) { outputFormat.setCodec(codec); } if (schema != null) { outputFormat.setSchema(schema); } final ByteArrayOutputStream bos = new ByteArrayOutputStream(); // when try (final ObjectOutputStream oos = new ObjectOutputStream(bos)) { oos.writeObject(outputFormat); } try (final ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray()))) { // then Object o = ois.readObject(); assertTrue(o instanceof AvroOutputFormat); @SuppressWarnings("unchecked") final AvroOutputFormat<User> restored = (AvroOutputFormat<User>) o; final AvroOutputFormat.Codec restoredCodec = (AvroOutputFormat.Codec) Whitebox.getInternalState(restored, "codec"); final Schema restoredSchema = (Schema) Whitebox.getInternalState(restored, "userDefinedSchema"); assertTrue(codec != null ? restoredCodec == codec : restoredCodec == null); assertTrue(schema != null ? restoredSchema.equals(schema) : restoredSchema == null); } }
Example #7
Source File: FailoverRegionTest.java From flink with Apache License 2.0 | 5 votes |
/** * Attach pending checkpoints of chk-42 and chk-43 to the execution graph. * If {@link #acknowledgeAllCheckpoints(CheckpointCoordinator, Iterator)} called then, * chk-42 would become the completed checkpoint. */ private void attachPendingCheckpoints(ExecutionGraph eg) throws IOException { final Map<Long, PendingCheckpoint> pendingCheckpoints = new HashMap<>(); final Map<ExecutionAttemptID, ExecutionVertex> verticesToConfirm = new HashMap<>(); eg.getAllExecutionVertices().forEach(e -> { Execution ee = e.getCurrentExecutionAttempt(); if (ee != null) { verticesToConfirm.put(ee.getAttemptId(), e); } }); CheckpointCoordinator checkpointCoordinator = eg.getCheckpointCoordinator(); assertNotNull(checkpointCoordinator); CheckpointStorageCoordinatorView checkpointStorage = checkpointCoordinator.getCheckpointStorage(); pendingCheckpoints.put(checkpointId, new PendingCheckpoint( eg.getJobID(), checkpointId, 0L, verticesToConfirm, CheckpointProperties.forCheckpoint(CheckpointRetentionPolicy.RETAIN_ON_FAILURE), checkpointStorage.initializeLocationForCheckpoint(checkpointId), eg.getFutureExecutor())); long newCheckpointId = checkpointId + 1; pendingCheckpoints.put(newCheckpointId, new PendingCheckpoint( eg.getJobID(), newCheckpointId, 0L, verticesToConfirm, CheckpointProperties.forCheckpoint(CheckpointRetentionPolicy.RETAIN_ON_FAILURE), checkpointStorage.initializeLocationForCheckpoint(newCheckpointId), eg.getFutureExecutor())); Whitebox.setInternalState(checkpointCoordinator, "pendingCheckpoints", pendingCheckpoints); }
Example #8
Source File: AvroOutputFormatTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void serializeAndDeserialize(final AvroOutputFormat.Codec codec, final Schema schema) throws IOException, ClassNotFoundException { // given final AvroOutputFormat<User> outputFormat = new AvroOutputFormat<>(User.class); if (codec != null) { outputFormat.setCodec(codec); } if (schema != null) { outputFormat.setSchema(schema); } final ByteArrayOutputStream bos = new ByteArrayOutputStream(); // when try (final ObjectOutputStream oos = new ObjectOutputStream(bos)) { oos.writeObject(outputFormat); } try (final ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray()))) { // then Object o = ois.readObject(); assertTrue(o instanceof AvroOutputFormat); @SuppressWarnings("unchecked") final AvroOutputFormat<User> restored = (AvroOutputFormat<User>) o; final AvroOutputFormat.Codec restoredCodec = (AvroOutputFormat.Codec) Whitebox.getInternalState(restored, "codec"); final Schema restoredSchema = (Schema) Whitebox.getInternalState(restored, "userDefinedSchema"); assertTrue(codec != null ? restoredCodec == codec : restoredCodec == null); assertTrue(schema != null ? restoredSchema.equals(schema) : restoredSchema == null); } }
Example #9
Source File: AvroOutputFormatTest.java From flink with Apache License 2.0 | 5 votes |
private void serializeAndDeserialize(final AvroOutputFormat.Codec codec, final Schema schema) throws IOException, ClassNotFoundException { // given final AvroOutputFormat<User> outputFormat = new AvroOutputFormat<>(User.class); if (codec != null) { outputFormat.setCodec(codec); } if (schema != null) { outputFormat.setSchema(schema); } final ByteArrayOutputStream bos = new ByteArrayOutputStream(); // when try (final ObjectOutputStream oos = new ObjectOutputStream(bos)) { oos.writeObject(outputFormat); } try (final ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray()))) { // then Object o = ois.readObject(); assertTrue(o instanceof AvroOutputFormat); @SuppressWarnings("unchecked") final AvroOutputFormat<User> restored = (AvroOutputFormat<User>) o; final AvroOutputFormat.Codec restoredCodec = (AvroOutputFormat.Codec) Whitebox.getInternalState(restored, "codec"); final Schema restoredSchema = (Schema) Whitebox.getInternalState(restored, "userDefinedSchema"); assertTrue(codec != null ? restoredCodec == codec : restoredCodec == null); assertTrue(schema != null ? restoredSchema.equals(schema) : restoredSchema == null); } }
Example #10
Source File: CEPOperatorTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testKeyedCEPOperatorNFAUpdateTimes() throws Exception { CepOperator<Event, Integer, Map<String, List<Event>>> operator = CepOperatorTestUtilities.getKeyedCepOpearator( true, new SimpleNFAFactory()); OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = CepOperatorTestUtilities.getCepTestHarness(operator); try { harness.open(); final ValueState nfaOperatorState = (ValueState) Whitebox.<ValueState>getInternalState(operator, "computationStates"); final ValueState nfaOperatorStateSpy = Mockito.spy(nfaOperatorState); Whitebox.setInternalState(operator, "computationStates", nfaOperatorStateSpy); Event startEvent = new Event(42, "c", 1.0); SubEvent middleEvent = new SubEvent(42, "a", 1.0, 10.0); Event endEvent = new Event(42, "b", 1.0); harness.processElement(new StreamRecord<>(startEvent, 1L)); harness.processElement(new StreamRecord<>(new Event(42, "d", 1.0), 4L)); harness.processElement(new StreamRecord<Event>(middleEvent, 4L)); harness.processElement(new StreamRecord<>(endEvent, 4L)); // verify the number of invocations NFA is updated Mockito.verify(nfaOperatorStateSpy, Mockito.times(3)).update(Mockito.any()); // get and verify the output Queue<Object> result = harness.getOutput(); assertEquals(1, result.size()); verifyPattern(result.poll(), startEvent, middleEvent, endEvent); } finally { harness.close(); } }
Example #11
Source File: StreamTaskTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * FLINK-5985 * * <p>This test ensures that empty snapshots (no op/keyed stated whatsoever) will be reported as stateless tasks. This * happens by translating an empty {@link SubtaskState} into reporting 'null' to #acknowledgeCheckpoint. */ @Test public void testEmptySubtaskStateLeadsToStatelessAcknowledgment() throws Exception { final long checkpointId = 42L; final long timestamp = 1L; Environment mockEnvironment = spy(new MockEnvironmentBuilder().build()); // latch blocks until the async checkpoint thread acknowledges final OneShotLatch checkpointCompletedLatch = new OneShotLatch(); final List<SubtaskState> checkpointResult = new ArrayList<>(1); CheckpointResponder checkpointResponder = mock(CheckpointResponder.class); doAnswer(new Answer() { @Override public Object answer(InvocationOnMock invocation) throws Throwable { SubtaskState subtaskState = invocation.getArgument(4); checkpointResult.add(subtaskState); checkpointCompletedLatch.trigger(); return null; } }).when(checkpointResponder).acknowledgeCheckpoint( any(JobID.class), any(ExecutionAttemptID.class), anyLong(), any(CheckpointMetrics.class), nullable(TaskStateSnapshot.class)); TaskStateManager taskStateManager = new TaskStateManagerImpl( new JobID(1L, 2L), new ExecutionAttemptID(1L, 2L), mock(TaskLocalStateStoreImpl.class), null, checkpointResponder); when(mockEnvironment.getTaskStateManager()).thenReturn(taskStateManager); StreamTask<?, ?> streamTask = new EmptyStreamTask(mockEnvironment); CheckpointMetaData checkpointMetaData = new CheckpointMetaData(checkpointId, timestamp); // mock the operators StreamOperator<?> statelessOperator = mock(StreamOperator.class); final OperatorID operatorID = new OperatorID(); when(statelessOperator.getOperatorID()).thenReturn(operatorID); // mock the returned empty snapshot result (all state handles are null) OperatorSnapshotFutures statelessOperatorSnapshotResult = new OperatorSnapshotFutures(); when(statelessOperator.snapshotState(anyLong(), anyLong(), any(CheckpointOptions.class), any(CheckpointStreamFactory.class))) .thenReturn(statelessOperatorSnapshotResult); // set up the task StreamOperator<?>[] streamOperators = {statelessOperator}; OperatorChain<Void, AbstractStreamOperator<Void>> operatorChain = mock(OperatorChain.class); when(operatorChain.getAllOperators()).thenReturn(streamOperators); Whitebox.setInternalState(streamTask, "isRunning", true); Whitebox.setInternalState(streamTask, "lock", new Object()); Whitebox.setInternalState(streamTask, "operatorChain", operatorChain); Whitebox.setInternalState(streamTask, "cancelables", new CloseableRegistry()); Whitebox.setInternalState(streamTask, "configuration", new StreamConfig(new Configuration())); Whitebox.setInternalState(streamTask, "asyncOperationsThreadPool", Executors.newCachedThreadPool()); Whitebox.setInternalState(streamTask, "checkpointStorage", new MemoryBackendCheckpointStorage(new JobID(), null, null, Integer.MAX_VALUE)); streamTask.triggerCheckpoint(checkpointMetaData, CheckpointOptions.forCheckpointWithDefaultLocation()); checkpointCompletedLatch.await(30, TimeUnit.SECONDS); streamTask.cancel(); // ensure that 'null' was acknowledged as subtask state Assert.assertNull(checkpointResult.get(0)); }
Example #12
Source File: StreamTaskTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * FLINK-5667 * * <p>Tests that a concurrent cancel operation discards the state handles of a not yet * acknowledged checkpoint and prevents sending an acknowledge message to the * CheckpointCoordinator. The situation can only happen if the cancel call is executed * before Environment.acknowledgeCheckpoint(). */ @Test public void testAsyncCheckpointingConcurrentCloseBeforeAcknowledge() throws Exception { final long checkpointId = 42L; final long timestamp = 1L; final OneShotLatch createSubtask = new OneShotLatch(); final OneShotLatch completeSubtask = new OneShotLatch(); Environment mockEnvironment = spy(new MockEnvironmentBuilder().build()); whenNew(OperatorSnapshotFinalizer.class). withAnyArguments(). thenAnswer((Answer<OperatorSnapshotFinalizer>) invocation -> { createSubtask.trigger(); completeSubtask.await(); Object[] arguments = invocation.getArguments(); return new OperatorSnapshotFinalizer((OperatorSnapshotFutures) arguments[0]); } ); StreamTask<?, ?> streamTask = new EmptyStreamTask(mockEnvironment); CheckpointMetaData checkpointMetaData = new CheckpointMetaData(checkpointId, timestamp); final StreamOperator<?> streamOperator = mock(StreamOperator.class); final OperatorID operatorID = new OperatorID(); when(streamOperator.getOperatorID()).thenReturn(operatorID); KeyedStateHandle managedKeyedStateHandle = mock(KeyedStateHandle.class); KeyedStateHandle rawKeyedStateHandle = mock(KeyedStateHandle.class); OperatorStateHandle managedOperatorStateHandle = mock(OperatorStreamStateHandle.class); OperatorStateHandle rawOperatorStateHandle = mock(OperatorStreamStateHandle.class); OperatorSnapshotFutures operatorSnapshotResult = new OperatorSnapshotFutures( DoneFuture.of(SnapshotResult.of(managedKeyedStateHandle)), DoneFuture.of(SnapshotResult.of(rawKeyedStateHandle)), DoneFuture.of(SnapshotResult.of(managedOperatorStateHandle)), DoneFuture.of(SnapshotResult.of(rawOperatorStateHandle))); when(streamOperator.snapshotState(anyLong(), anyLong(), any(CheckpointOptions.class), any(CheckpointStreamFactory.class))).thenReturn(operatorSnapshotResult); StreamOperator<?>[] streamOperators = {streamOperator}; OperatorChain<Void, AbstractStreamOperator<Void>> operatorChain = mock(OperatorChain.class); when(operatorChain.getAllOperators()).thenReturn(streamOperators); CheckpointStorage checkpointStorage = new MemoryBackendCheckpointStorage(new JobID(), null, null, Integer.MAX_VALUE); ExecutorService executor = Executors.newFixedThreadPool(1); Whitebox.setInternalState(streamTask, "isRunning", true); Whitebox.setInternalState(streamTask, "lock", new Object()); Whitebox.setInternalState(streamTask, "operatorChain", operatorChain); Whitebox.setInternalState(streamTask, "cancelables", new CloseableRegistry()); Whitebox.setInternalState(streamTask, "asyncOperationsThreadPool", executor); Whitebox.setInternalState(streamTask, "configuration", new StreamConfig(new Configuration())); Whitebox.setInternalState(streamTask, "checkpointStorage", checkpointStorage); streamTask.triggerCheckpoint(checkpointMetaData, CheckpointOptions.forCheckpointWithDefaultLocation()); createSubtask.await(); streamTask.cancel(); completeSubtask.trigger(); // wait for the completion of the async task executor.shutdown(); if (!executor.awaitTermination(10000L, TimeUnit.MILLISECONDS)) { fail("Executor did not shut down within the given timeout. This indicates that the " + "checkpointing did not resume."); } // check that the checkpoint has not been acknowledged verify(mockEnvironment, never()).acknowledgeCheckpoint(eq(checkpointId), any(CheckpointMetrics.class), any(TaskStateSnapshot.class)); // check that the state handles have been discarded verify(managedKeyedStateHandle).discardState(); verify(rawKeyedStateHandle).discardState(); verify(managedOperatorStateHandle).discardState(); verify(rawOperatorStateHandle).discardState(); }
Example #13
Source File: CEPOperatorTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testKeyedCEPOperatorNFAUpdateTimesWithRocksDB() throws Exception { String rocksDbPath = tempFolder.newFolder().getAbsolutePath(); RocksDBStateBackend rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend()); rocksDBStateBackend.setDbStoragePath(rocksDbPath); CepOperator<Event, Integer, Map<String, List<Event>>> operator = CepOperatorTestUtilities.getKeyedCepOpearator( true, new SimpleNFAFactory()); OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = CepOperatorTestUtilities.getCepTestHarness( operator); try { harness.setStateBackend(rocksDBStateBackend); harness.open(); final ValueState nfaOperatorState = (ValueState) Whitebox.<ValueState>getInternalState(operator, "computationStates"); final ValueState nfaOperatorStateSpy = Mockito.spy(nfaOperatorState); Whitebox.setInternalState(operator, "computationStates", nfaOperatorStateSpy); Event startEvent = new Event(42, "c", 1.0); SubEvent middleEvent = new SubEvent(42, "a", 1.0, 10.0); Event endEvent = new Event(42, "b", 1.0); harness.processElement(new StreamRecord<>(startEvent, 1L)); harness.processElement(new StreamRecord<>(new Event(42, "d", 1.0), 4L)); harness.processElement(new StreamRecord<Event>(middleEvent, 4L)); harness.processElement(new StreamRecord<>(endEvent, 4L)); // verify the number of invocations NFA is updated Mockito.verify(nfaOperatorStateSpy, Mockito.times(3)).update(Mockito.any()); // get and verify the output Queue<Object> result = harness.getOutput(); assertEquals(1, result.size()); verifyPattern(result.poll(), startEvent, middleEvent, endEvent); } finally { harness.close(); } }
Example #14
Source File: FailoverStrategyCheckpointCoordinatorTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that {@link CheckpointCoordinator#abortPendingCheckpoints(CheckpointException)} * called by {@link AdaptedRestartPipelinedRegionStrategyNG} or {@link FailoverRegion} could handle * the {@code currentPeriodicTrigger} null situation well. */ @Test public void testAbortPendingCheckpointsWithTriggerValidation() { final int maxConcurrentCheckpoints = ThreadLocalRandom.current().nextInt(10) + 1; ExecutionVertex executionVertex = mockExecutionVertex(); CheckpointCoordinatorConfiguration checkpointCoordinatorConfiguration = new CheckpointCoordinatorConfiguration( Integer.MAX_VALUE, Integer.MAX_VALUE, 0, maxConcurrentCheckpoints, CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION, true, false, 0); CheckpointCoordinator checkpointCoordinator = new CheckpointCoordinator( new JobID(), checkpointCoordinatorConfiguration, new ExecutionVertex[] { executionVertex }, new ExecutionVertex[] { executionVertex }, new ExecutionVertex[] { executionVertex }, new StandaloneCheckpointIDCounter(), new StandaloneCompletedCheckpointStore(1), new MemoryStateBackend(), Executors.directExecutor(), SharedStateRegistry.DEFAULT_FACTORY, mock(CheckpointFailureManager.class)); // switch current execution's state to running to allow checkpoint could be triggered. mockExecutionRunning(executionVertex); // use manual checkpoint timer to trigger period checkpoints as we expect. ManualCheckpointTimer manualCheckpointTimer = new ManualCheckpointTimer(manualThreadExecutor); // set the init delay as 0 to ensure first checkpoint could be triggered once we trigger the manual executor // this is used to avoid the randomness of when to trigger the first checkpoint (introduced via FLINK-9352) manualCheckpointTimer.setManualDelay(0L); Whitebox.setInternalState(checkpointCoordinator, "timer", manualCheckpointTimer); checkpointCoordinator.startCheckpointScheduler(); assertTrue(checkpointCoordinator.isCurrentPeriodicTriggerAvailable()); manualThreadExecutor.triggerAll(); manualThreadExecutor.triggerScheduledTasks(); assertEquals(1, checkpointCoordinator.getNumberOfPendingCheckpoints()); for (int i = 1; i < maxConcurrentCheckpoints; i++) { checkpointCoordinator.triggerCheckpoint(System.currentTimeMillis(), false); assertEquals(i + 1, checkpointCoordinator.getNumberOfPendingCheckpoints()); assertTrue(checkpointCoordinator.isCurrentPeriodicTriggerAvailable()); } // as we only support limited concurrent checkpoints, after checkpoint triggered more than the limits, // the currentPeriodicTrigger would been assigned as null. checkpointCoordinator.triggerCheckpoint(System.currentTimeMillis(), false); assertFalse(checkpointCoordinator.isCurrentPeriodicTriggerAvailable()); assertEquals(maxConcurrentCheckpoints, checkpointCoordinator.getNumberOfPendingCheckpoints()); checkpointCoordinator.abortPendingCheckpoints( new CheckpointException(CheckpointFailureReason.JOB_FAILOVER_REGION)); // after aborting checkpoints, we ensure currentPeriodicTrigger still available. assertTrue(checkpointCoordinator.isCurrentPeriodicTriggerAvailable()); assertEquals(0, checkpointCoordinator.getNumberOfPendingCheckpoints()); }
Example #15
Source File: StreamTaskTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Tests that in case of a failing AsyncCheckpointRunnable all operator snapshot results are * cancelled and all non partitioned state handles are discarded. */ @Test public void testFailingAsyncCheckpointRunnable() throws Exception { final long checkpointId = 42L; final long timestamp = 1L; MockEnvironment mockEnvironment = new MockEnvironmentBuilder().build(); StreamTask<?, ?> streamTask = spy(new EmptyStreamTask(mockEnvironment)); CheckpointMetaData checkpointMetaData = new CheckpointMetaData(checkpointId, timestamp); // mock the operators StreamOperator<?> streamOperator1 = mock(StreamOperator.class); StreamOperator<?> streamOperator2 = mock(StreamOperator.class); StreamOperator<?> streamOperator3 = mock(StreamOperator.class); // mock the new state operator snapshots OperatorSnapshotFutures operatorSnapshotResult1 = mock(OperatorSnapshotFutures.class); OperatorSnapshotFutures operatorSnapshotResult2 = mock(OperatorSnapshotFutures.class); OperatorSnapshotFutures operatorSnapshotResult3 = mock(OperatorSnapshotFutures.class); RunnableFuture<SnapshotResult<OperatorStateHandle>> failingFuture = mock(RunnableFuture.class); when(failingFuture.get()).thenThrow(new ExecutionException(new Exception("Test exception"))); when(operatorSnapshotResult3.getOperatorStateRawFuture()).thenReturn(failingFuture); when(streamOperator1.snapshotState(anyLong(), anyLong(), any(CheckpointOptions.class), any(CheckpointStreamFactory.class))).thenReturn(operatorSnapshotResult1); when(streamOperator2.snapshotState(anyLong(), anyLong(), any(CheckpointOptions.class), any(CheckpointStreamFactory.class))).thenReturn(operatorSnapshotResult2); when(streamOperator3.snapshotState(anyLong(), anyLong(), any(CheckpointOptions.class), any(CheckpointStreamFactory.class))).thenReturn(operatorSnapshotResult3); OperatorID operatorID1 = new OperatorID(); OperatorID operatorID2 = new OperatorID(); OperatorID operatorID3 = new OperatorID(); when(streamOperator1.getOperatorID()).thenReturn(operatorID1); when(streamOperator2.getOperatorID()).thenReturn(operatorID2); when(streamOperator3.getOperatorID()).thenReturn(operatorID3); StreamOperator<?>[] streamOperators = {streamOperator1, streamOperator2, streamOperator3}; OperatorChain<Void, AbstractStreamOperator<Void>> operatorChain = mock(OperatorChain.class); when(operatorChain.getAllOperators()).thenReturn(streamOperators); Whitebox.setInternalState(streamTask, "isRunning", true); Whitebox.setInternalState(streamTask, "lock", new Object()); Whitebox.setInternalState(streamTask, "operatorChain", operatorChain); Whitebox.setInternalState(streamTask, "cancelables", new CloseableRegistry()); Whitebox.setInternalState(streamTask, "asyncOperationsThreadPool", newDirectExecutorService()); Whitebox.setInternalState(streamTask, "configuration", new StreamConfig(new Configuration())); Whitebox.setInternalState(streamTask, "checkpointStorage", new MemoryBackendCheckpointStorage(new JobID(), null, null, Integer.MAX_VALUE)); CheckpointExceptionHandlerFactory checkpointExceptionHandlerFactory = new CheckpointExceptionHandlerFactory(); CheckpointExceptionHandler checkpointExceptionHandler = checkpointExceptionHandlerFactory.createCheckpointExceptionHandler(true, mockEnvironment); Whitebox.setInternalState(streamTask, "synchronousCheckpointExceptionHandler", checkpointExceptionHandler); StreamTask.AsyncCheckpointExceptionHandler asyncCheckpointExceptionHandler = new StreamTask.AsyncCheckpointExceptionHandler(streamTask); Whitebox.setInternalState(streamTask, "asynchronousCheckpointExceptionHandler", asyncCheckpointExceptionHandler); mockEnvironment.setExpectedExternalFailureCause(Throwable.class); streamTask.triggerCheckpoint(checkpointMetaData, CheckpointOptions.forCheckpointWithDefaultLocation()); verify(streamTask).handleAsyncException(anyString(), any(Throwable.class)); verify(operatorSnapshotResult1).cancel(); verify(operatorSnapshotResult2).cancel(); verify(operatorSnapshotResult3).cancel(); }
Example #16
Source File: StreamTaskTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testFailingCheckpointStreamOperator() throws Exception { final long checkpointId = 42L; final long timestamp = 1L; TaskInfo mockTaskInfo = mock(TaskInfo.class); when(mockTaskInfo.getTaskNameWithSubtasks()).thenReturn("foobar"); when(mockTaskInfo.getIndexOfThisSubtask()).thenReturn(0); Environment mockEnvironment = new MockEnvironmentBuilder().build(); StreamTask<?, ?> streamTask = new EmptyStreamTask(mockEnvironment); CheckpointMetaData checkpointMetaData = new CheckpointMetaData(checkpointId, timestamp); // mock the operators StreamOperator<?> streamOperator1 = mock(StreamOperator.class); StreamOperator<?> streamOperator2 = mock(StreamOperator.class); StreamOperator<?> streamOperator3 = mock(StreamOperator.class); // mock the returned snapshots OperatorSnapshotFutures operatorSnapshotResult1 = mock(OperatorSnapshotFutures.class); OperatorSnapshotFutures operatorSnapshotResult2 = mock(OperatorSnapshotFutures.class); final Exception testException = new Exception("Test exception"); when(streamOperator1.snapshotState(anyLong(), anyLong(), any(CheckpointOptions.class), any(CheckpointStreamFactory.class))).thenReturn(operatorSnapshotResult1); when(streamOperator2.snapshotState(anyLong(), anyLong(), any(CheckpointOptions.class), any(CheckpointStreamFactory.class))).thenReturn(operatorSnapshotResult2); when(streamOperator3.snapshotState(anyLong(), anyLong(), any(CheckpointOptions.class), any(CheckpointStreamFactory.class))).thenThrow(testException); OperatorID operatorID1 = new OperatorID(); OperatorID operatorID2 = new OperatorID(); OperatorID operatorID3 = new OperatorID(); when(streamOperator1.getOperatorID()).thenReturn(operatorID1); when(streamOperator2.getOperatorID()).thenReturn(operatorID2); when(streamOperator3.getOperatorID()).thenReturn(operatorID3); // set up the task StreamOperator<?>[] streamOperators = {streamOperator1, streamOperator2, streamOperator3}; OperatorChain<Void, AbstractStreamOperator<Void>> operatorChain = mock(OperatorChain.class); when(operatorChain.getAllOperators()).thenReturn(streamOperators); Whitebox.setInternalState(streamTask, "isRunning", true); Whitebox.setInternalState(streamTask, "lock", new Object()); Whitebox.setInternalState(streamTask, "operatorChain", operatorChain); Whitebox.setInternalState(streamTask, "cancelables", new CloseableRegistry()); Whitebox.setInternalState(streamTask, "configuration", new StreamConfig(new Configuration())); Whitebox.setInternalState(streamTask, "checkpointStorage", new MemoryBackendCheckpointStorage(new JobID(), null, null, Integer.MAX_VALUE)); CheckpointExceptionHandlerFactory checkpointExceptionHandlerFactory = new CheckpointExceptionHandlerFactory(); CheckpointExceptionHandler checkpointExceptionHandler = checkpointExceptionHandlerFactory.createCheckpointExceptionHandler(true, mockEnvironment); Whitebox.setInternalState(streamTask, "synchronousCheckpointExceptionHandler", checkpointExceptionHandler); StreamTask.AsyncCheckpointExceptionHandler asyncCheckpointExceptionHandler = new StreamTask.AsyncCheckpointExceptionHandler(streamTask); Whitebox.setInternalState(streamTask, "asynchronousCheckpointExceptionHandler", asyncCheckpointExceptionHandler); try { streamTask.triggerCheckpoint(checkpointMetaData, CheckpointOptions.forCheckpointWithDefaultLocation()); fail("Expected test exception here."); } catch (Exception e) { assertEquals(testException, e.getCause()); } verify(operatorSnapshotResult1).cancel(); verify(operatorSnapshotResult2).cancel(); }
Example #17
Source File: AbstractStreamOperatorTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that a failing snapshot method call to the keyed state backend will trigger the closing * of the StateSnapshotContextSynchronousImpl and the cancellation of the * OperatorSnapshotResult. The latter is supposed to also cancel all assigned futures. */ @Test public void testFailingBackendSnapshotMethod() throws Exception { final long checkpointId = 42L; final long timestamp = 1L; final Exception failingException = new Exception("Test exception"); final CloseableRegistry closeableRegistry = new CloseableRegistry(); RunnableFuture<SnapshotResult<KeyedStateHandle>> futureKeyedStateHandle = mock(RunnableFuture.class); RunnableFuture<SnapshotResult<OperatorStateHandle>> futureOperatorStateHandle = mock(RunnableFuture.class); StateSnapshotContextSynchronousImpl context = spy(new StateSnapshotContextSynchronousImpl(checkpointId, timestamp)); when(context.getKeyedStateStreamFuture()).thenReturn(futureKeyedStateHandle); when(context.getOperatorStateStreamFuture()).thenReturn(futureOperatorStateHandle); OperatorSnapshotFutures operatorSnapshotResult = spy(new OperatorSnapshotFutures()); whenNew(StateSnapshotContextSynchronousImpl.class) .withArguments( anyLong(), anyLong(), any(CheckpointStreamFactory.class), nullable(KeyGroupRange.class), any(CloseableRegistry.class)) .thenReturn(context); whenNew(OperatorSnapshotFutures.class).withAnyArguments().thenReturn(operatorSnapshotResult); StreamTask<Void, AbstractStreamOperator<Void>> containingTask = mock(StreamTask.class); when(containingTask.getCancelables()).thenReturn(closeableRegistry); AbstractStreamOperator<Void> operator = mock(AbstractStreamOperator.class); when(operator.snapshotState(anyLong(), anyLong(), any(CheckpointOptions.class), any(CheckpointStreamFactory.class))).thenCallRealMethod(); doCallRealMethod().when(operator).close(); doCallRealMethod().when(operator).dispose(); doReturn(containingTask).when(operator).getContainingTask(); RunnableFuture<SnapshotResult<OperatorStateHandle>> futureManagedOperatorStateHandle = mock(RunnableFuture.class); OperatorStateBackend operatorStateBackend = mock(OperatorStateBackend.class); when(operatorStateBackend.snapshot( eq(checkpointId), eq(timestamp), any(CheckpointStreamFactory.class), any(CheckpointOptions.class))).thenReturn(futureManagedOperatorStateHandle); AbstractKeyedStateBackend<?> keyedStateBackend = mock(AbstractKeyedStateBackend.class); when(keyedStateBackend.snapshot( eq(checkpointId), eq(timestamp), any(CheckpointStreamFactory.class), eq(CheckpointOptions.forCheckpointWithDefaultLocation()))).thenThrow(failingException); closeableRegistry.registerCloseable(operatorStateBackend); closeableRegistry.registerCloseable(keyedStateBackend); Whitebox.setInternalState(operator, "operatorStateBackend", operatorStateBackend); Whitebox.setInternalState(operator, "keyedStateBackend", keyedStateBackend); try { operator.snapshotState( checkpointId, timestamp, CheckpointOptions.forCheckpointWithDefaultLocation(), new MemCheckpointStreamFactory(Integer.MAX_VALUE)); fail("Exception expected."); } catch (Exception e) { assertEquals(failingException, e.getCause()); } // verify that the context has been closed, the operator snapshot result has been cancelled // and that all futures have been cancelled. verify(operatorSnapshotResult).cancel(); verify(futureKeyedStateHandle).cancel(anyBoolean()); verify(futureOperatorStateHandle).cancel(anyBoolean()); verify(futureKeyedStateHandle).cancel(anyBoolean()); operator.close(); operator.dispose(); verify(operatorStateBackend).close(); verify(keyedStateBackend).close(); verify(operatorStateBackend).dispose(); verify(keyedStateBackend).dispose(); }
Example #18
Source File: AbstractStreamOperatorTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Tests that a failing snapshot method call to the keyed state backend will trigger the closing * of the StateSnapshotContextSynchronousImpl and the cancellation of the * OperatorSnapshotResult. The latter is supposed to also cancel all assigned futures. */ @Test public void testFailingBackendSnapshotMethod() throws Exception { final long checkpointId = 42L; final long timestamp = 1L; final Exception failingException = new Exception("Test exception"); final CloseableRegistry closeableRegistry = new CloseableRegistry(); RunnableFuture<SnapshotResult<KeyedStateHandle>> futureKeyedStateHandle = mock(RunnableFuture.class); RunnableFuture<SnapshotResult<OperatorStateHandle>> futureOperatorStateHandle = mock(RunnableFuture.class); StateSnapshotContextSynchronousImpl context = spy(new StateSnapshotContextSynchronousImpl(checkpointId, timestamp)); when(context.getKeyedStateStreamFuture()).thenReturn(futureKeyedStateHandle); when(context.getOperatorStateStreamFuture()).thenReturn(futureOperatorStateHandle); OperatorSnapshotFutures operatorSnapshotResult = spy(new OperatorSnapshotFutures()); whenNew(StateSnapshotContextSynchronousImpl.class) .withArguments( anyLong(), anyLong(), any(CheckpointStreamFactory.class), nullable(KeyGroupRange.class), any(CloseableRegistry.class)) .thenReturn(context); whenNew(OperatorSnapshotFutures.class).withAnyArguments().thenReturn(operatorSnapshotResult); StreamTask<Void, AbstractStreamOperator<Void>> containingTask = mock(StreamTask.class); when(containingTask.getCancelables()).thenReturn(closeableRegistry); AbstractStreamOperator<Void> operator = mock(AbstractStreamOperator.class); when(operator.snapshotState(anyLong(), anyLong(), any(CheckpointOptions.class), any(CheckpointStreamFactory.class))).thenCallRealMethod(); doCallRealMethod().when(operator).close(); doCallRealMethod().when(operator).dispose(); doReturn(containingTask).when(operator).getContainingTask(); RunnableFuture<SnapshotResult<OperatorStateHandle>> futureManagedOperatorStateHandle = mock(RunnableFuture.class); OperatorStateBackend operatorStateBackend = mock(OperatorStateBackend.class); when(operatorStateBackend.snapshot( eq(checkpointId), eq(timestamp), any(CheckpointStreamFactory.class), any(CheckpointOptions.class))).thenReturn(futureManagedOperatorStateHandle); AbstractKeyedStateBackend<?> keyedStateBackend = mock(AbstractKeyedStateBackend.class); when(keyedStateBackend.snapshot( eq(checkpointId), eq(timestamp), any(CheckpointStreamFactory.class), eq(CheckpointOptions.forCheckpointWithDefaultLocation()))).thenThrow(failingException); closeableRegistry.registerCloseable(operatorStateBackend); closeableRegistry.registerCloseable(keyedStateBackend); Whitebox.setInternalState(operator, "operatorStateBackend", operatorStateBackend); Whitebox.setInternalState(operator, "keyedStateBackend", keyedStateBackend); try { operator.snapshotState( checkpointId, timestamp, CheckpointOptions.forCheckpointWithDefaultLocation(), new MemCheckpointStreamFactory(Integer.MAX_VALUE)); fail("Exception expected."); } catch (Exception e) { assertEquals(failingException, e.getCause()); } // verify that the context has been closed, the operator snapshot result has been cancelled // and that all futures have been cancelled. verify(context).close(); verify(operatorSnapshotResult).cancel(); verify(futureKeyedStateHandle).cancel(anyBoolean()); verify(futureOperatorStateHandle).cancel(anyBoolean()); verify(futureKeyedStateHandle).cancel(anyBoolean()); operator.close(); operator.dispose(); verify(operatorStateBackend).close(); verify(keyedStateBackend).close(); verify(operatorStateBackend).dispose(); verify(keyedStateBackend).dispose(); }
Example #19
Source File: CEPOperatorTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testKeyedCEPOperatorNFAUpdateTimesWithRocksDB() throws Exception { String rocksDbPath = tempFolder.newFolder().getAbsolutePath(); RocksDBStateBackend rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend()); rocksDBStateBackend.setDbStoragePath(rocksDbPath); CepOperator<Event, Integer, Map<String, List<Event>>> operator = CepOperatorTestUtilities.getKeyedCepOpearator( true, new SimpleNFAFactory()); OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = CepOperatorTestUtilities.getCepTestHarness( operator); try { harness.setStateBackend(rocksDBStateBackend); harness.open(); final ValueState nfaOperatorState = (ValueState) Whitebox.<ValueState>getInternalState(operator, "computationStates"); final ValueState nfaOperatorStateSpy = Mockito.spy(nfaOperatorState); Whitebox.setInternalState(operator, "computationStates", nfaOperatorStateSpy); Event startEvent = new Event(42, "c", 1.0); SubEvent middleEvent = new SubEvent(42, "a", 1.0, 10.0); Event endEvent = new Event(42, "b", 1.0); harness.processElement(new StreamRecord<>(startEvent, 1L)); harness.processElement(new StreamRecord<>(new Event(42, "d", 1.0), 4L)); harness.processElement(new StreamRecord<Event>(middleEvent, 4L)); harness.processElement(new StreamRecord<>(endEvent, 4L)); // verify the number of invocations NFA is updated Mockito.verify(nfaOperatorStateSpy, Mockito.times(3)).update(Mockito.any()); // get and verify the output Queue<Object> result = harness.getOutput(); assertEquals(1, result.size()); verifyPattern(result.poll(), startEvent, middleEvent, endEvent); } finally { harness.close(); } }
Example #20
Source File: CEPOperatorTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testKeyedCEPOperatorNFAUpdateTimesWithRocksDB() throws Exception { String rocksDbPath = tempFolder.newFolder().getAbsolutePath(); RocksDBStateBackend rocksDBStateBackend = new RocksDBStateBackend(new MemoryStateBackend()); rocksDBStateBackend.setDbStoragePath(rocksDbPath); CepOperator<Event, Integer, Map<String, List<Event>>> operator = CepOperatorTestUtilities.getKeyedCepOpearator( true, new SimpleNFAFactory()); OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = CepOperatorTestUtilities.getCepTestHarness( operator); try { harness.setStateBackend(rocksDBStateBackend); harness.open(); final ValueState nfaOperatorState = (ValueState) Whitebox.<ValueState>getInternalState(operator, "computationStates"); final ValueState nfaOperatorStateSpy = Mockito.spy(nfaOperatorState); Whitebox.setInternalState(operator, "computationStates", nfaOperatorStateSpy); Event startEvent = new Event(42, "c", 1.0); SubEvent middleEvent = new SubEvent(42, "a", 1.0, 10.0); Event endEvent = new Event(42, "b", 1.0); harness.processElement(new StreamRecord<>(startEvent, 1L)); harness.processElement(new StreamRecord<>(new Event(42, "d", 1.0), 4L)); harness.processElement(new StreamRecord<Event>(middleEvent, 4L)); harness.processElement(new StreamRecord<>(endEvent, 4L)); // verify the number of invocations NFA is updated Mockito.verify(nfaOperatorStateSpy, Mockito.times(3)).update(Mockito.any()); // get and verify the output Queue<Object> result = harness.getOutput(); assertEquals(1, result.size()); verifyPattern(result.poll(), startEvent, middleEvent, endEvent); } finally { harness.close(); } }
Example #21
Source File: CheckpointCoordinatorTestingUtils.java From flink with Apache License 2.0 | 4 votes |
static ExecutionVertex mockExecutionVertex( ExecutionAttemptID attemptID, JobVertexID jobVertexID, List<OperatorID> jobVertexIDs, @Nullable LogicalSlot slot, int parallelism, int maxParallelism, ExecutionState state, ExecutionState ... successiveStates) { ExecutionVertex vertex = mock(ExecutionVertex.class); final Execution exec = spy(new Execution( mock(Executor.class), vertex, 1, 1L, 1L, Time.milliseconds(500L) )); if (slot != null) { // is there a better way to do this? Whitebox.setInternalState(exec, "assignedResource", slot); } when(exec.getAttemptId()).thenReturn(attemptID); when(exec.getState()).thenReturn(state, successiveStates); when(vertex.getJobvertexId()).thenReturn(jobVertexID); when(vertex.getCurrentExecutionAttempt()).thenReturn(exec); when(vertex.getTotalNumberOfParallelSubtasks()).thenReturn(parallelism); when(vertex.getMaxParallelism()).thenReturn(maxParallelism); ExecutionJobVertex jobVertex = mock(ExecutionJobVertex.class); List<OperatorIDPair> operatorIDPairs = new ArrayList<>(); for (OperatorID operatorID : jobVertexIDs) { operatorIDPairs.add(OperatorIDPair.generatedIDOnly(operatorID)); } when(jobVertex.getOperatorIDs()).thenReturn(operatorIDPairs); when(vertex.getJobVertex()).thenReturn(jobVertex); return vertex; }