org.apache.flink.streaming.connectors.fs.SequenceFileWriter Java Examples
The following examples show how to use
org.apache.flink.streaming.connectors.fs.SequenceFileWriter.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BucketingSinkTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * This tests {@link SequenceFileWriter} * with non-rolling output and without compression. */ @Test public void testNonRollingSequenceFileWithoutCompressionWriter() throws Exception { final String outPath = hdfsURI + "/seq-no-comp-non-rolling-out"; final int numElements = 20; BucketingSink<Tuple2<IntWritable, Text>> sink = new BucketingSink<Tuple2<IntWritable, Text>>(outPath) .setWriter(new SequenceFileWriter<IntWritable, Text>()) .setBucketer(new BasePathBucketer<Tuple2<IntWritable, Text>>()) .setPartPrefix(PART_PREFIX) .setPendingPrefix("") .setPendingSuffix(""); sink.setInputType(TypeInformation.of(new TypeHint<Tuple2<IntWritable, Text>>(){}), new ExecutionConfig()); OneInputStreamOperatorTestHarness<Tuple2<IntWritable, Text>, Object> testHarness = createTestSink(sink, 1, 0); testHarness.setProcessingTime(0L); testHarness.setup(); testHarness.open(); for (int i = 0; i < numElements; i++) { testHarness.processElement(new StreamRecord<>(Tuple2.of( new IntWritable(i), new Text("message #" + Integer.toString(i)) ))); } testHarness.close(); FSDataInputStream inStream = dfs.open(new Path(outPath + "/" + PART_PREFIX + "-0-0")); SequenceFile.Reader reader = new SequenceFile.Reader(inStream, 1000, 0, 100000, new Configuration()); IntWritable intWritable = new IntWritable(); Text txt = new Text(); for (int i = 0; i < numElements; i++) { reader.next(intWritable, txt); Assert.assertEquals(i, intWritable.get()); Assert.assertEquals("message #" + i, txt.toString()); } reader.close(); inStream.close(); }
Example #2
Source File: BucketingSinkTest.java From flink with Apache License 2.0 | 4 votes |
/** * This tests {@link SequenceFileWriter} * with non-rolling output and without compression. */ @Test public void testNonRollingSequenceFileWithoutCompressionWriter() throws Exception { final String outPath = hdfsURI + "/seq-no-comp-non-rolling-out"; final int numElements = 20; BucketingSink<Tuple2<IntWritable, Text>> sink = new BucketingSink<Tuple2<IntWritable, Text>>(outPath) .setWriter(new SequenceFileWriter<IntWritable, Text>()) .setBucketer(new BasePathBucketer<Tuple2<IntWritable, Text>>()) .setPartPrefix(PART_PREFIX) .setPendingPrefix("") .setPendingSuffix(""); sink.setInputType(TypeInformation.of(new TypeHint<Tuple2<IntWritable, Text>>(){}), new ExecutionConfig()); OneInputStreamOperatorTestHarness<Tuple2<IntWritable, Text>, Object> testHarness = createTestSink(sink, 1, 0); testHarness.setProcessingTime(0L); testHarness.setup(); testHarness.open(); for (int i = 0; i < numElements; i++) { testHarness.processElement(new StreamRecord<>(Tuple2.of( new IntWritable(i), new Text("message #" + Integer.toString(i)) ))); } testHarness.close(); FSDataInputStream inStream = dfs.open(new Path(outPath + "/" + PART_PREFIX + "-0-0")); SequenceFile.Reader reader = new SequenceFile.Reader(inStream, 1000, 0, 100000, new Configuration()); IntWritable intWritable = new IntWritable(); Text txt = new Text(); for (int i = 0; i < numElements; i++) { reader.next(intWritable, txt); Assert.assertEquals(i, intWritable.get()); Assert.assertEquals("message #" + i, txt.toString()); } reader.close(); inStream.close(); }
Example #3
Source File: BucketingSinkTest.java From flink with Apache License 2.0 | 4 votes |
/** * This tests {@link SequenceFileWriter} * with non-rolling output and without compression. */ @Test public void testNonRollingSequenceFileWithoutCompressionWriter() throws Exception { final String outPath = hdfsURI + "/seq-no-comp-non-rolling-out"; final int numElements = 20; BucketingSink<Tuple2<IntWritable, Text>> sink = new BucketingSink<Tuple2<IntWritable, Text>>(outPath) .setWriter(new SequenceFileWriter<IntWritable, Text>()) .setBucketer(new BasePathBucketer<Tuple2<IntWritable, Text>>()) .setPartPrefix(PART_PREFIX) .setPendingPrefix("") .setPendingSuffix(""); sink.setInputType(TypeInformation.of(new TypeHint<Tuple2<IntWritable, Text>>(){}), new ExecutionConfig()); OneInputStreamOperatorTestHarness<Tuple2<IntWritable, Text>, Object> testHarness = createTestSink(sink, 1, 0); testHarness.setProcessingTime(0L); testHarness.setup(); testHarness.open(); for (int i = 0; i < numElements; i++) { testHarness.processElement(new StreamRecord<>(Tuple2.of( new IntWritable(i), new Text("message #" + Integer.toString(i)) ))); } testHarness.close(); FSDataInputStream inStream = dfs.open(new Path(outPath + "/" + PART_PREFIX + "-0-0")); SequenceFile.Reader reader = new SequenceFile.Reader(inStream, 1000, 0, 100000, new Configuration()); IntWritable intWritable = new IntWritable(); Text txt = new Text(); for (int i = 0; i < numElements; i++) { reader.next(intWritable, txt); Assert.assertEquals(i, intWritable.get()); Assert.assertEquals("message #" + i, txt.toString()); } reader.close(); inStream.close(); }