Java Code Examples for org.apache.flink.core.testutils.OneShotLatch#isTriggered()
The following examples show how to use
org.apache.flink.core.testutils.OneShotLatch#isTriggered() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ContinuousFileProcessingTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testProcessOnce() throws Exception { String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/"; final OneShotLatch latch = new OneShotLatch(); // create a single file in the directory Tuple2<org.apache.hadoop.fs.Path, String> bootstrap = createFileAndFillWithData(testBasePath, "file", NO_OF_FILES + 1, "This is test line."); Assert.assertTrue(hdfs.exists(bootstrap.f0)); // the source is supposed to read only this file. final Set<String> filesToBeRead = new TreeSet<>(); filesToBeRead.add(bootstrap.f0.getName()); TextInputFormat format = new TextInputFormat(new Path(testBasePath)); format.setFilesFilter(FilePathFilter.createDefaultFilter()); final ContinuousFileMonitoringFunction<String> monitoringFunction = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_ONCE); final FileVerifyingSourceContext context = new FileVerifyingSourceContext(latch, monitoringFunction); final Thread t = new Thread() { @Override public void run() { try { monitoringFunction.open(new Configuration()); monitoringFunction.run(context); // we would never arrive here if we were in // PROCESS_CONTINUOUSLY mode. // this will trigger the latch context.close(); } catch (Exception e) { Assert.fail(e.getMessage()); } } }; t.start(); if (!latch.isTriggered()) { latch.await(); } // create some additional files that should be processed in the case of PROCESS_CONTINUOUSLY final org.apache.hadoop.fs.Path[] filesCreated = new org.apache.hadoop.fs.Path[NO_OF_FILES]; for (int i = 0; i < NO_OF_FILES; i++) { Tuple2<org.apache.hadoop.fs.Path, String> ignoredFile = createFileAndFillWithData(testBasePath, "file", i, "This is test line."); filesCreated[i] = ignoredFile.f0; } // wait until the monitoring thread exits t.join(); Assert.assertArrayEquals(filesToBeRead.toArray(), context.getSeenFiles().toArray()); // finally delete the files created for the test. hdfs.delete(bootstrap.f0, false); for (org.apache.hadoop.fs.Path path: filesCreated) { hdfs.delete(path, false); } }
Example 2
Source File: ContinuousFileProcessingTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testProcessContinuously() throws Exception { String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/"; final OneShotLatch latch = new OneShotLatch(); // create a single file in the directory Tuple2<org.apache.hadoop.fs.Path, String> bootstrap = createFileAndFillWithData(testBasePath, "file", NO_OF_FILES + 1, "This is test line."); Assert.assertTrue(hdfs.exists(bootstrap.f0)); final Set<String> filesToBeRead = new TreeSet<>(); filesToBeRead.add(bootstrap.f0.getName()); TextInputFormat format = new TextInputFormat(new Path(testBasePath)); format.setFilesFilter(FilePathFilter.createDefaultFilter()); final ContinuousFileMonitoringFunction<String> monitoringFunction = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY); final int totalNoOfFilesToBeRead = NO_OF_FILES + 1; // 1 for the bootstrap + NO_OF_FILES final FileVerifyingSourceContext context = new FileVerifyingSourceContext(latch, monitoringFunction, 1, totalNoOfFilesToBeRead); final Thread t = new Thread() { @Override public void run() { try { monitoringFunction.open(new Configuration()); monitoringFunction.run(context); } catch (Exception e) { Assert.fail(e.getMessage()); } } }; t.start(); if (!latch.isTriggered()) { latch.await(); } // create some additional files that will be processed in the case of PROCESS_CONTINUOUSLY final org.apache.hadoop.fs.Path[] filesCreated = new org.apache.hadoop.fs.Path[NO_OF_FILES]; for (int i = 0; i < NO_OF_FILES; i++) { Tuple2<org.apache.hadoop.fs.Path, String> file = createFileAndFillWithData(testBasePath, "file", i, "This is test line."); filesCreated[i] = file.f0; filesToBeRead.add(file.f0.getName()); } // wait until the monitoring thread exits t.join(); Assert.assertArrayEquals(filesToBeRead.toArray(), context.getSeenFiles().toArray()); // finally delete the files created for the test. hdfs.delete(bootstrap.f0, false); for (org.apache.hadoop.fs.Path path: filesCreated) { hdfs.delete(path, false); } }
Example 3
Source File: ContinuousFileProcessingTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testFunctionRestore() throws Exception { String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/"; org.apache.hadoop.fs.Path path = null; long fileModTime = Long.MIN_VALUE; for (int i = 0; i < 1; i++) { Tuple2<org.apache.hadoop.fs.Path, String> file = createFileAndFillWithData(testBasePath, "file", i, "This is test line."); path = file.f0; fileModTime = hdfs.getFileStatus(file.f0).getModificationTime(); } TextInputFormat format = new TextInputFormat(new Path(testBasePath)); final ContinuousFileMonitoringFunction<String> monitoringFunction = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src = new StreamSource<>(monitoringFunction); final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness = new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0); testHarness.open(); final Throwable[] error = new Throwable[1]; final OneShotLatch latch = new OneShotLatch(); final DummySourceContext sourceContext = new DummySourceContext() { @Override public void collect(TimestampedFileInputSplit element) { latch.trigger(); } }; // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { monitoringFunction.run(sourceContext); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); // first condition for the source to have updated its state: emit at least one element if (!latch.isTriggered()) { latch.await(); } // second condition for the source to have updated its state: it's not on the lock anymore, // this means it has processed all the splits and updated its state. synchronized (sourceContext.getCheckpointLock()) {} OperatorSubtaskState snapshot = testHarness.snapshot(0, 0); monitoringFunction.cancel(); runner.join(); testHarness.close(); final ContinuousFileMonitoringFunction<String> monitoringFunctionCopy = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> srcCopy = new StreamSource<>(monitoringFunctionCopy); AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarnessCopy = new AbstractStreamOperatorTestHarness<>(srcCopy, 1, 1, 0); testHarnessCopy.initializeState(snapshot); testHarnessCopy.open(); Assert.assertNull(error[0]); Assert.assertEquals(fileModTime, monitoringFunctionCopy.getGlobalModificationTime()); hdfs.delete(path, false); }
Example 4
Source File: ContinuousFileProcessingTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testProcessOnce() throws Exception { String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/"; final OneShotLatch latch = new OneShotLatch(); // create a single file in the directory Tuple2<org.apache.hadoop.fs.Path, String> bootstrap = createFileAndFillWithData(testBasePath, "file", NO_OF_FILES + 1, "This is test line."); Assert.assertTrue(hdfs.exists(bootstrap.f0)); // the source is supposed to read only this file. final Set<String> filesToBeRead = new TreeSet<>(); filesToBeRead.add(bootstrap.f0.getName()); TextInputFormat format = new TextInputFormat(new Path(testBasePath)); format.setFilesFilter(FilePathFilter.createDefaultFilter()); final ContinuousFileMonitoringFunction<String> monitoringFunction = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_ONCE); final FileVerifyingSourceContext context = new FileVerifyingSourceContext(latch, monitoringFunction); final Thread t = new Thread() { @Override public void run() { try { monitoringFunction.open(new Configuration()); monitoringFunction.run(context); // we would never arrive here if we were in // PROCESS_CONTINUOUSLY mode. // this will trigger the latch context.close(); } catch (Exception e) { Assert.fail(e.getMessage()); } } }; t.start(); if (!latch.isTriggered()) { latch.await(); } // create some additional files that should be processed in the case of PROCESS_CONTINUOUSLY final org.apache.hadoop.fs.Path[] filesCreated = new org.apache.hadoop.fs.Path[NO_OF_FILES]; for (int i = 0; i < NO_OF_FILES; i++) { Tuple2<org.apache.hadoop.fs.Path, String> ignoredFile = createFileAndFillWithData(testBasePath, "file", i, "This is test line."); filesCreated[i] = ignoredFile.f0; } // wait until the monitoring thread exits t.join(); Assert.assertArrayEquals(filesToBeRead.toArray(), context.getSeenFiles().toArray()); // finally delete the files created for the test. hdfs.delete(bootstrap.f0, false); for (org.apache.hadoop.fs.Path path: filesCreated) { hdfs.delete(path, false); } }
Example 5
Source File: ContinuousFileProcessingMigrationTest.java From flink with Apache License 2.0 | 4 votes |
/** * Manually run this to write binary snapshot data. Remove @Ignore to run. */ @Ignore @Test public void writeMonitoringSourceSnapshot() throws Exception { File testFolder = tempFolder.newFolder(); long fileModTime = Long.MIN_VALUE; for (int i = 0; i < 1; i++) { Tuple2<File, String> file = createFileAndFillWithData(testFolder, "file", i, "This is test line."); fileModTime = file.f0.lastModified(); } TextInputFormat format = new TextInputFormat(new Path(testFolder.getAbsolutePath())); final ContinuousFileMonitoringFunction<String> monitoringFunction = new ContinuousFileMonitoringFunction<>(format, FileProcessingMode.PROCESS_CONTINUOUSLY, 1, INTERVAL); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src = new StreamSource<>(monitoringFunction); final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness = new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0); testHarness.open(); final Throwable[] error = new Throwable[1]; final OneShotLatch latch = new OneShotLatch(); // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { monitoringFunction.run(new DummySourceContext() { @Override public void collect(TimestampedFileInputSplit element) { latch.trigger(); } @Override public void markAsTemporarilyIdle() { } }); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); if (!latch.isTriggered()) { latch.await(); } final OperatorSubtaskState snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle( snapshot, "src/test/resources/monitoring-function-migration-test-" + fileModTime + "-flink" + flinkGenerateSavepointVersion + "-snapshot"); monitoringFunction.cancel(); runner.join(); testHarness.close(); }
Example 6
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 4 votes |
private void writeSnapshot(String path, HashMap<KafkaTopicPartition, Long> state) throws Exception { final OneShotLatch latch = new OneShotLatch(); final AbstractFetcher<String, ?> fetcher = mock(AbstractFetcher.class); doAnswer(new Answer<Void>() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { latch.trigger(); return null; } }).when(fetcher).runFetchLoop(); when(fetcher.snapshotCurrentState()).thenReturn(state); final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(fetcher, TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); testHarness.open(); final Throwable[] error = new Throwable[1]; // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { consumerFunction.run(new DummySourceContext() { @Override public void collect(String element) { } }); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); if (!latch.isTriggered()) { latch.await(); } final OperatorSubtaskState snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle(snapshot, path); consumerOperator.close(); runner.join(); }
Example 7
Source File: ContinuousFileProcessingRescalingTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testReaderScalingUp() throws Exception { // simulates the scenario of scaling up from 1 to 2 instances final OneShotLatch waitingLatch1 = new OneShotLatch(); final OneShotLatch triggerLatch1 = new OneShotLatch(); BlockingFileInputFormat format1 = new BlockingFileInputFormat( triggerLatch1, waitingLatch1, new Path("test"), 20, 5); FileInputSplit[] splits = format1.createInputSplits(2); OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness1 = getTestHarness(format1, 1, 0); testHarness1.open(); testHarness1.processElement(new StreamRecord<>(getTimestampedSplit(0, splits[0]))); testHarness1.processElement(new StreamRecord<>(getTimestampedSplit(1, splits[1]))); // wait until its arrives to element 5 if (!triggerLatch1.isTriggered()) { triggerLatch1.await(); } OperatorSubtaskState snapshot = testHarness1.snapshot(0, 0); // this will be the init state for new instance-0 OperatorSubtaskState initState1 = AbstractStreamOperatorTestHarness.repartitionOperatorState(snapshot, maxParallelism, 1, 2, 0); // this will be the init state for new instance-1 OperatorSubtaskState initState2 = AbstractStreamOperatorTestHarness.repartitionOperatorState(snapshot, maxParallelism, 1, 2, 1); // 1) clear the output of instance so that we can compare it with one created by the new instances, and // 2) let the operator process the rest of its state testHarness1.getOutput().clear(); waitingLatch1.trigger(); // create the second instance and let it process the second split till element 15 final OneShotLatch triggerLatch2 = new OneShotLatch(); final OneShotLatch waitingLatch2 = new OneShotLatch(); BlockingFileInputFormat format2 = new BlockingFileInputFormat( triggerLatch2, waitingLatch2, new Path("test"), 20, 15); OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness2 = getTestHarness(format2, 2, 0); testHarness2.setup(); testHarness2.initializeState(initState1); testHarness2.open(); BlockingFileInputFormat format3 = new BlockingFileInputFormat( triggerLatch2, waitingLatch2, new Path("test"), 20, 15); OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness3 = getTestHarness(format3, 2, 1); testHarness3.setup(); testHarness3.initializeState(initState2); testHarness3.open(); triggerLatch2.trigger(); waitingLatch2.trigger(); // and wait for the processing to finish synchronized (testHarness1.getCheckpointLock()) { testHarness1.close(); } synchronized (testHarness2.getCheckpointLock()) { testHarness2.close(); } synchronized (testHarness3.getCheckpointLock()) { testHarness3.close(); } Queue<Object> expectedResult = new ArrayDeque<>(); putElementsInQ(expectedResult, testHarness1.getOutput()); Queue<Object> actualResult = new ArrayDeque<>(); putElementsInQ(actualResult, testHarness2.getOutput()); putElementsInQ(actualResult, testHarness3.getOutput()); Assert.assertEquals(35, actualResult.size()); Assert.assertArrayEquals(expectedResult.toArray(), actualResult.toArray()); }
Example 8
Source File: ContinuousFileProcessingRescalingTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testReaderScalingDown() throws Exception { // simulates the scenario of scaling down from 2 to 1 instances final OneShotLatch waitingLatch = new OneShotLatch(); // create the first instance and let it process the first split till element 5 final OneShotLatch triggerLatch1 = new OneShotLatch(); BlockingFileInputFormat format1 = new BlockingFileInputFormat( triggerLatch1, waitingLatch, new Path("test"), 20, 5); FileInputSplit[] splits = format1.createInputSplits(2); OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness1 = getTestHarness(format1, 2, 0); testHarness1.open(); testHarness1.processElement(new StreamRecord<>(getTimestampedSplit(0, splits[0]))); // wait until its arrives to element 5 if (!triggerLatch1.isTriggered()) { triggerLatch1.await(); } // create the second instance and let it process the second split till element 15 final OneShotLatch triggerLatch2 = new OneShotLatch(); BlockingFileInputFormat format2 = new BlockingFileInputFormat( triggerLatch2, waitingLatch, new Path("test"), 20, 15); OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness2 = getTestHarness(format2, 2, 1); testHarness2.open(); testHarness2.processElement(new StreamRecord<>(getTimestampedSplit(0, splits[1]))); // wait until its arrives to element 15 if (!triggerLatch2.isTriggered()) { triggerLatch2.await(); } // 1) clear the outputs of the two previous instances so that // we can compare their newly produced outputs with the merged one testHarness1.getOutput().clear(); testHarness2.getOutput().clear(); // 2) take the snapshots from the previous instances and merge them // into a new one which will be then used to initialize a third instance OperatorSubtaskState mergedState = AbstractStreamOperatorTestHarness. repackageState( testHarness2.snapshot(0, 0), testHarness1.snapshot(0, 0) ); // 3) and repartition to get the initialized state when scaling down. OperatorSubtaskState initState = AbstractStreamOperatorTestHarness.repartitionOperatorState(mergedState, maxParallelism, 2, 1, 0); // create the third instance final OneShotLatch wLatch = new OneShotLatch(); final OneShotLatch tLatch = new OneShotLatch(); BlockingFileInputFormat format = new BlockingFileInputFormat(wLatch, tLatch, new Path("test"), 20, 5); OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness = getTestHarness(format, 1, 0); // initialize the state of the new operator with the constructed by // combining the partial states of the instances above. testHarness.initializeState(initState); testHarness.open(); // now restart the waiting operators wLatch.trigger(); tLatch.trigger(); waitingLatch.trigger(); // and wait for the processing to finish synchronized (testHarness1.getCheckpointLock()) { testHarness1.close(); } synchronized (testHarness2.getCheckpointLock()) { testHarness2.close(); } synchronized (testHarness.getCheckpointLock()) { testHarness.close(); } Queue<Object> expectedResult = new ArrayDeque<>(); putElementsInQ(expectedResult, testHarness1.getOutput()); putElementsInQ(expectedResult, testHarness2.getOutput()); Queue<Object> actualResult = new ArrayDeque<>(); putElementsInQ(actualResult, testHarness.getOutput()); Assert.assertEquals(20, actualResult.size()); Assert.assertArrayEquals(expectedResult.toArray(), actualResult.toArray()); }
Example 9
Source File: ContinuousFileProcessingTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testProcessContinuously() throws Exception { String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/"; final OneShotLatch latch = new OneShotLatch(); // create a single file in the directory Tuple2<org.apache.hadoop.fs.Path, String> bootstrap = createFileAndFillWithData(testBasePath, "file", NO_OF_FILES + 1, "This is test line."); Assert.assertTrue(hdfs.exists(bootstrap.f0)); final Set<String> filesToBeRead = new TreeSet<>(); filesToBeRead.add(bootstrap.f0.getName()); TextInputFormat format = new TextInputFormat(new Path(testBasePath)); format.setFilesFilter(FilePathFilter.createDefaultFilter()); final ContinuousFileMonitoringFunction<String> monitoringFunction = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY); final int totalNoOfFilesToBeRead = NO_OF_FILES + 1; // 1 for the bootstrap + NO_OF_FILES final FileVerifyingSourceContext context = new FileVerifyingSourceContext(latch, monitoringFunction, 1, totalNoOfFilesToBeRead); final Thread t = new Thread() { @Override public void run() { try { monitoringFunction.open(new Configuration()); monitoringFunction.run(context); } catch (Exception e) { Assert.fail(e.getMessage()); } } }; t.start(); if (!latch.isTriggered()) { latch.await(); } // create some additional files that will be processed in the case of PROCESS_CONTINUOUSLY final org.apache.hadoop.fs.Path[] filesCreated = new org.apache.hadoop.fs.Path[NO_OF_FILES]; for (int i = 0; i < NO_OF_FILES; i++) { Tuple2<org.apache.hadoop.fs.Path, String> file = createFileAndFillWithData(testBasePath, "file", i, "This is test line."); filesCreated[i] = file.f0; filesToBeRead.add(file.f0.getName()); } // wait until the monitoring thread exits t.join(); Assert.assertArrayEquals(filesToBeRead.toArray(), context.getSeenFiles().toArray()); // finally delete the files created for the test. hdfs.delete(bootstrap.f0, false); for (org.apache.hadoop.fs.Path path: filesCreated) { hdfs.delete(path, false); } }
Example 10
Source File: ContinuousFileProcessingTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testFunctionRestore() throws Exception { String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/"; org.apache.hadoop.fs.Path path = null; long fileModTime = Long.MIN_VALUE; for (int i = 0; i < 1; i++) { Tuple2<org.apache.hadoop.fs.Path, String> file = createFileAndFillWithData(testBasePath, "file", i, "This is test line."); path = file.f0; fileModTime = hdfs.getFileStatus(file.f0).getModificationTime(); } TextInputFormat format = new TextInputFormat(new Path(testBasePath)); final ContinuousFileMonitoringFunction<String> monitoringFunction = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src = new StreamSource<>(monitoringFunction); final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness = new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0); testHarness.open(); final Throwable[] error = new Throwable[1]; final OneShotLatch latch = new OneShotLatch(); final DummySourceContext sourceContext = new DummySourceContext() { @Override public void collect(TimestampedFileInputSplit element) { latch.trigger(); } }; // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { monitoringFunction.run(sourceContext); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); // first condition for the source to have updated its state: emit at least one element if (!latch.isTriggered()) { latch.await(); } // second condition for the source to have updated its state: it's not on the lock anymore, // this means it has processed all the splits and updated its state. synchronized (sourceContext.getCheckpointLock()) {} OperatorSubtaskState snapshot = testHarness.snapshot(0, 0); monitoringFunction.cancel(); runner.join(); testHarness.close(); final ContinuousFileMonitoringFunction<String> monitoringFunctionCopy = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> srcCopy = new StreamSource<>(monitoringFunctionCopy); AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarnessCopy = new AbstractStreamOperatorTestHarness<>(srcCopy, 1, 1, 0); testHarnessCopy.initializeState(snapshot); testHarnessCopy.open(); Assert.assertNull(error[0]); Assert.assertEquals(fileModTime, monitoringFunctionCopy.getGlobalModificationTime()); hdfs.delete(path, false); }
Example 11
Source File: FlinkKafkaConsumerBaseMigrationTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
private void writeSnapshot(String path, HashMap<KafkaTopicPartition, Long> state) throws Exception { final OneShotLatch latch = new OneShotLatch(); final AbstractFetcher<String, ?> fetcher = mock(AbstractFetcher.class); doAnswer(new Answer<Void>() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { latch.trigger(); return null; } }).when(fetcher).runFetchLoop(); when(fetcher.snapshotCurrentState()).thenReturn(state); final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(fetcher, TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); testHarness.open(); final Throwable[] error = new Throwable[1]; // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { consumerFunction.run(new DummySourceContext() { @Override public void collect(String element) { } }); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); if (!latch.isTriggered()) { latch.await(); } final OperatorSubtaskState snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle(snapshot, path); consumerOperator.close(); runner.join(); }
Example 12
Source File: ContinuousFileProcessingMigrationTest.java From flink with Apache License 2.0 | 4 votes |
/** * Manually run this to write binary snapshot data. Remove @Ignore to run. */ @Ignore @Test public void writeMonitoringSourceSnapshot() throws Exception { File testFolder = tempFolder.newFolder(); long fileModTime = Long.MIN_VALUE; for (int i = 0; i < 1; i++) { Tuple2<File, String> file = createFileAndFillWithData(testFolder, "file", i, "This is test line."); fileModTime = file.f0.lastModified(); } TextInputFormat format = new TextInputFormat(new Path(testFolder.getAbsolutePath())); final ContinuousFileMonitoringFunction<String> monitoringFunction = new ContinuousFileMonitoringFunction<>(format, FileProcessingMode.PROCESS_CONTINUOUSLY, 1, INTERVAL); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src = new StreamSource<>(monitoringFunction); final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness = new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0); testHarness.open(); final Throwable[] error = new Throwable[1]; final OneShotLatch latch = new OneShotLatch(); // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { monitoringFunction.run(new DummySourceContext() { @Override public void collect(TimestampedFileInputSplit element) { latch.trigger(); } @Override public void markAsTemporarilyIdle() { } }); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); if (!latch.isTriggered()) { latch.await(); } final OperatorSubtaskState snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle( snapshot, "src/test/resources/monitoring-function-migration-test-" + fileModTime + "-flink" + flinkGenerateSavepointVersion + "-snapshot"); monitoringFunction.cancel(); runner.join(); testHarness.close(); }
Example 13
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 4 votes |
private void writeSnapshot(String path, HashMap<KafkaTopicPartition, Long> state) throws Exception { final OneShotLatch latch = new OneShotLatch(); final AbstractFetcher<String, ?> fetcher = mock(AbstractFetcher.class); doAnswer(new Answer<Void>() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { latch.trigger(); return null; } }).when(fetcher).runFetchLoop(); when(fetcher.snapshotCurrentState()).thenReturn(state); final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(fetcher, TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); testHarness.open(); final Throwable[] error = new Throwable[1]; // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { consumerFunction.run(new DummySourceContext() { @Override public void collect(String element) { } }); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); if (!latch.isTriggered()) { latch.await(); } final OperatorSubtaskState snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle(snapshot, path); consumerOperator.close(); runner.join(); }
Example 14
Source File: ContinuousFileProcessingRescalingTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testReaderScalingUp() throws Exception { // simulates the scenario of scaling up from 1 to 2 instances final OneShotLatch waitingLatch1 = new OneShotLatch(); final OneShotLatch triggerLatch1 = new OneShotLatch(); BlockingFileInputFormat format1 = new BlockingFileInputFormat( triggerLatch1, waitingLatch1, new Path("test"), 20, 5); FileInputSplit[] splits = format1.createInputSplits(2); OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness1 = getTestHarness(format1, 1, 0); testHarness1.open(); testHarness1.processElement(new StreamRecord<>(getTimestampedSplit(0, splits[0]))); testHarness1.processElement(new StreamRecord<>(getTimestampedSplit(1, splits[1]))); // wait until its arrives to element 5 if (!triggerLatch1.isTriggered()) { triggerLatch1.await(); } OperatorSubtaskState snapshot = testHarness1.snapshot(0, 0); // this will be the init state for new instance-0 OperatorSubtaskState initState1 = AbstractStreamOperatorTestHarness.repartitionOperatorState(snapshot, maxParallelism, 1, 2, 0); // this will be the init state for new instance-1 OperatorSubtaskState initState2 = AbstractStreamOperatorTestHarness.repartitionOperatorState(snapshot, maxParallelism, 1, 2, 1); // 1) clear the output of instance so that we can compare it with one created by the new instances, and // 2) let the operator process the rest of its state testHarness1.getOutput().clear(); waitingLatch1.trigger(); // create the second instance and let it process the second split till element 15 final OneShotLatch triggerLatch2 = new OneShotLatch(); final OneShotLatch waitingLatch2 = new OneShotLatch(); BlockingFileInputFormat format2 = new BlockingFileInputFormat( triggerLatch2, waitingLatch2, new Path("test"), 20, 15); OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness2 = getTestHarness(format2, 2, 0); testHarness2.setup(); testHarness2.initializeState(initState1); testHarness2.open(); BlockingFileInputFormat format3 = new BlockingFileInputFormat( triggerLatch2, waitingLatch2, new Path("test"), 20, 15); OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness3 = getTestHarness(format3, 2, 1); testHarness3.setup(); testHarness3.initializeState(initState2); testHarness3.open(); triggerLatch2.trigger(); waitingLatch2.trigger(); // and wait for the processing to finish synchronized (testHarness1.getCheckpointLock()) { testHarness1.close(); } synchronized (testHarness2.getCheckpointLock()) { testHarness2.close(); } synchronized (testHarness3.getCheckpointLock()) { testHarness3.close(); } Queue<Object> expectedResult = new ArrayDeque<>(); putElementsInQ(expectedResult, testHarness1.getOutput()); Queue<Object> actualResult = new ArrayDeque<>(); putElementsInQ(actualResult, testHarness2.getOutput()); putElementsInQ(actualResult, testHarness3.getOutput()); Assert.assertEquals(35, actualResult.size()); Assert.assertArrayEquals(expectedResult.toArray(), actualResult.toArray()); }
Example 15
Source File: ContinuousFileProcessingRescalingTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testReaderScalingDown() throws Exception { // simulates the scenario of scaling down from 2 to 1 instances final OneShotLatch waitingLatch = new OneShotLatch(); // create the first instance and let it process the first split till element 5 final OneShotLatch triggerLatch1 = new OneShotLatch(); BlockingFileInputFormat format1 = new BlockingFileInputFormat( triggerLatch1, waitingLatch, new Path("test"), 20, 5); FileInputSplit[] splits = format1.createInputSplits(2); OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness1 = getTestHarness(format1, 2, 0); testHarness1.open(); testHarness1.processElement(new StreamRecord<>(getTimestampedSplit(0, splits[0]))); // wait until its arrives to element 5 if (!triggerLatch1.isTriggered()) { triggerLatch1.await(); } // create the second instance and let it process the second split till element 15 final OneShotLatch triggerLatch2 = new OneShotLatch(); BlockingFileInputFormat format2 = new BlockingFileInputFormat( triggerLatch2, waitingLatch, new Path("test"), 20, 15); OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness2 = getTestHarness(format2, 2, 1); testHarness2.open(); testHarness2.processElement(new StreamRecord<>(getTimestampedSplit(0, splits[1]))); // wait until its arrives to element 15 if (!triggerLatch2.isTriggered()) { triggerLatch2.await(); } // 1) clear the outputs of the two previous instances so that // we can compare their newly produced outputs with the merged one testHarness1.getOutput().clear(); testHarness2.getOutput().clear(); // 2) take the snapshots from the previous instances and merge them // into a new one which will be then used to initialize a third instance OperatorSubtaskState mergedState = AbstractStreamOperatorTestHarness. repackageState( testHarness2.snapshot(0, 0), testHarness1.snapshot(0, 0) ); // 3) and repartition to get the initialized state when scaling down. OperatorSubtaskState initState = AbstractStreamOperatorTestHarness.repartitionOperatorState(mergedState, maxParallelism, 2, 1, 0); // create the third instance final OneShotLatch wLatch = new OneShotLatch(); final OneShotLatch tLatch = new OneShotLatch(); BlockingFileInputFormat format = new BlockingFileInputFormat(wLatch, tLatch, new Path("test"), 20, 5); OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness = getTestHarness(format, 1, 0); // initialize the state of the new operator with the constructed by // combining the partial states of the instances above. testHarness.initializeState(initState); testHarness.open(); // now restart the waiting operators wLatch.trigger(); tLatch.trigger(); waitingLatch.trigger(); // and wait for the processing to finish synchronized (testHarness1.getCheckpointLock()) { testHarness1.close(); } synchronized (testHarness2.getCheckpointLock()) { testHarness2.close(); } synchronized (testHarness.getCheckpointLock()) { testHarness.close(); } Queue<Object> expectedResult = new ArrayDeque<>(); putElementsInQ(expectedResult, testHarness1.getOutput()); putElementsInQ(expectedResult, testHarness2.getOutput()); Queue<Object> actualResult = new ArrayDeque<>(); putElementsInQ(actualResult, testHarness.getOutput()); Assert.assertEquals(20, actualResult.size()); Assert.assertArrayEquals(expectedResult.toArray(), actualResult.toArray()); }
Example 16
Source File: ContinuousFileProcessingTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testProcessContinuously() throws Exception { String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/"; final OneShotLatch latch = new OneShotLatch(); // create a single file in the directory Tuple2<org.apache.hadoop.fs.Path, String> bootstrap = createFileAndFillWithData(testBasePath, "file", NO_OF_FILES + 1, "This is test line."); Assert.assertTrue(hdfs.exists(bootstrap.f0)); final Set<String> filesToBeRead = new TreeSet<>(); filesToBeRead.add(bootstrap.f0.getName()); TextInputFormat format = new TextInputFormat(new Path(testBasePath)); format.setFilesFilter(FilePathFilter.createDefaultFilter()); final ContinuousFileMonitoringFunction<String> monitoringFunction = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY); final int totalNoOfFilesToBeRead = NO_OF_FILES + 1; // 1 for the bootstrap + NO_OF_FILES final FileVerifyingSourceContext context = new FileVerifyingSourceContext(latch, monitoringFunction, 1, totalNoOfFilesToBeRead); final Thread t = new Thread() { @Override public void run() { try { monitoringFunction.open(new Configuration()); monitoringFunction.run(context); } catch (Exception e) { Assert.fail(e.getMessage()); } } }; t.start(); if (!latch.isTriggered()) { latch.await(); } // create some additional files that will be processed in the case of PROCESS_CONTINUOUSLY final org.apache.hadoop.fs.Path[] filesCreated = new org.apache.hadoop.fs.Path[NO_OF_FILES]; for (int i = 0; i < NO_OF_FILES; i++) { Tuple2<org.apache.hadoop.fs.Path, String> file = createFileAndFillWithData(testBasePath, "file", i, "This is test line."); filesCreated[i] = file.f0; filesToBeRead.add(file.f0.getName()); } // wait until the monitoring thread exits t.join(); Assert.assertArrayEquals(filesToBeRead.toArray(), context.getSeenFiles().toArray()); // finally delete the files created for the test. hdfs.delete(bootstrap.f0, false); for (org.apache.hadoop.fs.Path path: filesCreated) { hdfs.delete(path, false); } }
Example 17
Source File: ContinuousFileProcessingTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testFunctionRestore() throws Exception { String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/"; org.apache.hadoop.fs.Path path = null; long fileModTime = Long.MIN_VALUE; for (int i = 0; i < 1; i++) { Tuple2<org.apache.hadoop.fs.Path, String> file = createFileAndFillWithData(testBasePath, "file", i, "This is test line."); path = file.f0; fileModTime = hdfs.getFileStatus(file.f0).getModificationTime(); } TextInputFormat format = new TextInputFormat(new Path(testBasePath)); final ContinuousFileMonitoringFunction<String> monitoringFunction = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src = new StreamSource<>(monitoringFunction); final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness = new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0); testHarness.open(); final Throwable[] error = new Throwable[1]; final OneShotLatch latch = new OneShotLatch(); final DummySourceContext sourceContext = new DummySourceContext() { @Override public void collect(TimestampedFileInputSplit element) { latch.trigger(); } }; // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { monitoringFunction.run(sourceContext); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); // first condition for the source to have updated its state: emit at least one element if (!latch.isTriggered()) { latch.await(); } // second condition for the source to have updated its state: it's not on the lock anymore, // this means it has processed all the splits and updated its state. synchronized (sourceContext.getCheckpointLock()) {} OperatorSubtaskState snapshot = testHarness.snapshot(0, 0); monitoringFunction.cancel(); runner.join(); testHarness.close(); final ContinuousFileMonitoringFunction<String> monitoringFunctionCopy = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> srcCopy = new StreamSource<>(monitoringFunctionCopy); AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarnessCopy = new AbstractStreamOperatorTestHarness<>(srcCopy, 1, 1, 0); testHarnessCopy.initializeState(snapshot); testHarnessCopy.open(); Assert.assertNull(error[0]); Assert.assertEquals(fileModTime, monitoringFunctionCopy.getGlobalModificationTime()); hdfs.delete(path, false); }
Example 18
Source File: ContinuousFileProcessingTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testProcessOnce() throws Exception { String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/"; final OneShotLatch latch = new OneShotLatch(); // create a single file in the directory Tuple2<org.apache.hadoop.fs.Path, String> bootstrap = createFileAndFillWithData(testBasePath, "file", NO_OF_FILES + 1, "This is test line."); Assert.assertTrue(hdfs.exists(bootstrap.f0)); // the source is supposed to read only this file. final Set<String> filesToBeRead = new TreeSet<>(); filesToBeRead.add(bootstrap.f0.getName()); TextInputFormat format = new TextInputFormat(new Path(testBasePath)); format.setFilesFilter(FilePathFilter.createDefaultFilter()); final ContinuousFileMonitoringFunction<String> monitoringFunction = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_ONCE); final FileVerifyingSourceContext context = new FileVerifyingSourceContext(latch, monitoringFunction); final Thread t = new Thread() { @Override public void run() { try { monitoringFunction.open(new Configuration()); monitoringFunction.run(context); // we would never arrive here if we were in // PROCESS_CONTINUOUSLY mode. // this will trigger the latch context.close(); } catch (Exception e) { Assert.fail(e.getMessage()); } } }; t.start(); if (!latch.isTriggered()) { latch.await(); } // create some additional files that should be processed in the case of PROCESS_CONTINUOUSLY final org.apache.hadoop.fs.Path[] filesCreated = new org.apache.hadoop.fs.Path[NO_OF_FILES]; for (int i = 0; i < NO_OF_FILES; i++) { Tuple2<org.apache.hadoop.fs.Path, String> ignoredFile = createFileAndFillWithData(testBasePath, "file", i, "This is test line."); filesCreated[i] = ignoredFile.f0; } // wait until the monitoring thread exits t.join(); Assert.assertArrayEquals(filesToBeRead.toArray(), context.getSeenFiles().toArray()); // finally delete the files created for the test. hdfs.delete(bootstrap.f0, false); for (org.apache.hadoop.fs.Path path: filesCreated) { hdfs.delete(path, false); } }
Example 19
Source File: ContinuousFileProcessingMigrationTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Manually run this to write binary snapshot data. Remove @Ignore to run. */ @Ignore @Test public void writeMonitoringSourceSnapshot() throws Exception { File testFolder = tempFolder.newFolder(); long fileModTime = Long.MIN_VALUE; for (int i = 0; i < 1; i++) { Tuple2<File, String> file = createFileAndFillWithData(testFolder, "file", i, "This is test line."); fileModTime = file.f0.lastModified(); } TextInputFormat format = new TextInputFormat(new Path(testFolder.getAbsolutePath())); final ContinuousFileMonitoringFunction<String> monitoringFunction = new ContinuousFileMonitoringFunction<>(format, FileProcessingMode.PROCESS_CONTINUOUSLY, 1, INTERVAL); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src = new StreamSource<>(monitoringFunction); final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness = new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0); testHarness.open(); final Throwable[] error = new Throwable[1]; final OneShotLatch latch = new OneShotLatch(); // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { monitoringFunction.run(new DummySourceContext() { @Override public void collect(TimestampedFileInputSplit element) { latch.trigger(); } @Override public void markAsTemporarilyIdle() { } }); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); if (!latch.isTriggered()) { latch.await(); } final OperatorSubtaskState snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle( snapshot, "src/test/resources/monitoring-function-migration-test-" + fileModTime + "-flink" + flinkGenerateSavepointVersion + "-snapshot"); monitoringFunction.cancel(); runner.join(); testHarness.close(); }