org.apache.flink.streaming.util.FiniteTestSource Java Examples
The following examples show how to use
org.apache.flink.streaming.util.FiniteTestSource.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: OrcBulkWriterITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testOrcBulkWriter() throws Exception { final File outDir = TEMPORARY_FOLDER.newFolder(); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); final Properties writerProps = new Properties(); writerProps.setProperty("orc.compress", "LZ4"); final OrcBulkWriterFactory<Record> factory = new OrcBulkWriterFactory<>( new RecordVectorizer(schema), writerProps, new Configuration()); env.setParallelism(1); env.enableCheckpointing(100); DataStream<Record> stream = env.addSource(new FiniteTestSource<>(testData), TypeInformation.of(Record.class)); stream.map(str -> str) .addSink(StreamingFileSink .forBulkFormat(new Path(outDir.toURI()), factory) .build()); env.execute(); OrcBulkWriterTestUtil.validate(outDir, testData); }
Example #2
Source File: ParquetStreamingFileSinkITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testWriteParquetAvroReflect() throws Exception { final File folder = TEMPORARY_FOLDER.newFolder(); final List<Datum> data = Arrays.asList( new Datum("a", 1), new Datum("b", 2), new Datum("c", 3)); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); DataStream<Datum> stream = env.addSource( new FiniteTestSource<>(data), TypeInformation.of(Datum.class)); stream.addSink( StreamingFileSink.forBulkFormat( Path.fromLocalFile(folder), ParquetAvroWriters.forReflectRecord(Datum.class)) .build()); env.execute(); validateResults(folder, ReflectData.get(), data); }
Example #3
Source File: ParquetStreamingFileSinkITCase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testWriteParquetAvroReflect() throws Exception { final File folder = TEMPORARY_FOLDER.newFolder(); final List<Datum> data = Arrays.asList( new Datum("a", 1), new Datum("b", 2), new Datum("c", 3)); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); DataStream<Datum> stream = env.addSource( new FiniteTestSource<>(data), TypeInformation.of(Datum.class)); stream.addSink( StreamingFileSink.forBulkFormat( Path.fromLocalFile(folder), ParquetAvroWriters.forReflectRecord(Datum.class)) .build()); env.execute(); validateResults(folder, ReflectData.get(), data); }
Example #4
Source File: ParquetStreamingFileSinkITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testWriteParquetAvroReflect() throws Exception { final File folder = TEMPORARY_FOLDER.newFolder(); final List<Datum> data = Arrays.asList( new Datum("a", 1), new Datum("b", 2), new Datum("c", 3)); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); DataStream<Datum> stream = env.addSource( new FiniteTestSource<>(data), TypeInformation.of(Datum.class)); stream.addSink( StreamingFileSink.forBulkFormat( Path.fromLocalFile(folder), ParquetAvroWriters.forReflectRecord(Datum.class)) .build()); env.execute(); validateResults(folder, ReflectData.get(), data); }
Example #5
Source File: AvroStreamingFileSinkITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testWriteAvroReflect() throws Exception { File folder = TEMPORARY_FOLDER.newFolder(); List<Datum> data = Arrays.asList( new Datum("a", 1), new Datum("b", 2), new Datum("c", 3)); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); AvroWriterFactory<Datum> avroWriterFactory = AvroWriters.forReflectRecord(Datum.class); DataStream<Datum> stream = env.addSource( new FiniteTestSource<>(data), TypeInformation.of(Datum.class)); stream.addSink(StreamingFileSink.forBulkFormat( Path.fromLocalFile(folder), avroWriterFactory).build()); env.execute(); validateResults(folder, new ReflectDatumReader<>(Datum.class), data); }
Example #6
Source File: AvroStreamingFileSinkITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testWriteAvroGeneric() throws Exception { File folder = TEMPORARY_FOLDER.newFolder(); Schema schema = Address.getClassSchema(); Collection<GenericRecord> data = new GenericTestDataCollection(); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); AvroWriterFactory<GenericRecord> avroWriterFactory = AvroWriters.forGenericRecord(schema); DataStream<GenericRecord> stream = env.addSource( new FiniteTestSource<>(data), new GenericRecordAvroTypeInfo(schema)); stream.addSink(StreamingFileSink.forBulkFormat( Path.fromLocalFile(folder), avroWriterFactory).build()); env.execute(); validateResults(folder, new GenericDatumReader<>(schema), new ArrayList<>(data)); }
Example #7
Source File: CompressionFactoryITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testWriteCompressedFile() throws Exception { final File folder = TEMPORARY_FOLDER.newFolder(); final Path testPath = Path.fromLocalFile(folder); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); DataStream<String> stream = env.addSource( new FiniteTestSource<>(testData), TypeInformation.of(String.class) ); stream.map(str -> str).addSink( StreamingFileSink.forBulkFormat( testPath, CompressWriters.forExtractor(new DefaultExtractor<String>()).withHadoopCompression(TEST_CODEC_NAME) ).build()); env.execute(); validateResults(folder, testData, new CompressionCodecFactory(configuration).getCodecByName(TEST_CODEC_NAME)); }
Example #8
Source File: AvroStreamingFileSinkITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testWriteAvroSpecific() throws Exception { File folder = TEMPORARY_FOLDER.newFolder(); List<Address> data = Arrays.asList( new Address(1, "a", "b", "c", "12345"), new Address(2, "p", "q", "r", "12345"), new Address(3, "x", "y", "z", "12345")); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); AvroWriterFactory<Address> avroWriterFactory = AvroWriters.forSpecificRecord(Address.class); DataStream<Address> stream = env.addSource( new FiniteTestSource<>(data), TypeInformation.of(Address.class)); stream.addSink(StreamingFileSink.forBulkFormat( Path.fromLocalFile(folder), avroWriterFactory).build()); env.execute(); validateResults(folder, new SpecificDatumReader<>(Address.class), data); }
Example #9
Source File: SequenceStreamingFileSinkITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testWriteSequenceFile() throws Exception { final File folder = TEMPORARY_FOLDER.newFolder(); final Path testPath = Path.fromLocalFile(folder); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); DataStream<Tuple2<Long, String>> stream = env.addSource( new FiniteTestSource<>(testData), TypeInformation.of(new TypeHint<Tuple2<Long, String>>() { }) ); stream.map(new MapFunction<Tuple2<Long, String>, Tuple2<LongWritable, Text>>() { @Override public Tuple2<LongWritable, Text> map(Tuple2<Long, String> value) throws Exception { return new Tuple2<>(new LongWritable(value.f0), new Text(value.f1)); } }).addSink( StreamingFileSink.forBulkFormat( testPath, new SequenceFileWriterFactory<>(configuration, LongWritable.class, Text.class, "BZip2") ).build()); env.execute(); validateResults(folder, testData); }
Example #10
Source File: ParquetStreamingFileSinkITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testWriteParquetAvroGeneric() throws Exception { final File folder = TEMPORARY_FOLDER.newFolder(); final Schema schema = Address.getClassSchema(); final Collection<GenericRecord> data = new GenericTestDataCollection(); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); DataStream<GenericRecord> stream = env.addSource( new FiniteTestSource<>(data), new GenericRecordAvroTypeInfo(schema)); stream.addSink( StreamingFileSink.forBulkFormat( Path.fromLocalFile(folder), ParquetAvroWriters.forGenericRecord(schema)) .build()); env.execute(); List<Address> expected = Arrays.asList( new Address(1, "a", "b", "c", "12345"), new Address(2, "x", "y", "z", "98765")); validateResults(folder, SpecificData.get(), expected); }
Example #11
Source File: ParquetStreamingFileSinkITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testWriteParquetAvroSpecific() throws Exception { final File folder = TEMPORARY_FOLDER.newFolder(); final List<Address> data = Arrays.asList( new Address(1, "a", "b", "c", "12345"), new Address(2, "p", "q", "r", "12345"), new Address(3, "x", "y", "z", "12345") ); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); DataStream<Address> stream = env.addSource( new FiniteTestSource<>(data), TypeInformation.of(Address.class)); stream.addSink( StreamingFileSink.forBulkFormat( Path.fromLocalFile(folder), ParquetAvroWriters.forSpecificRecord(Address.class)) .build()); env.execute(); validateResults(folder, SpecificData.get(), data); }
Example #12
Source File: HadoopPathBasedPartFileWriterTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testWriteFile() throws Exception { File file = TEMPORARY_FOLDER.newFolder(); Path basePath = new Path(file.toURI()); List<String> data = Arrays.asList( "first line", "second line", "third line"); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); DataStream<String> stream = env.addSource( new FiniteTestSource<>(data), TypeInformation.of(String.class)); Configuration configuration = new Configuration(); HadoopPathBasedBulkFormatBuilder<String, String, ?> builder = new HadoopPathBasedBulkFormatBuilder<>( basePath, new TestHadoopPathBasedBulkWriterFactory(), configuration, new DateTimeBucketAssigner<>()); TestStreamingFileSinkFactory<String> streamingFileSinkFactory = new TestStreamingFileSinkFactory<>(); stream.addSink(streamingFileSinkFactory.createSink(builder, 1000)); env.execute(); validateResult(data, configuration, basePath); }
Example #13
Source File: SequenceStreamingFileSinkITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testWriteSequenceFile() throws Exception { final File folder = TEMPORARY_FOLDER.newFolder(); final Path testPath = Path.fromLocalFile(folder); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); DataStream<Tuple2<Long, String>> stream = env.addSource( new FiniteTestSource<>(testData), TypeInformation.of(new TypeHint<Tuple2<Long, String>>() { }) ); stream.map(new MapFunction<Tuple2<Long, String>, Tuple2<LongWritable, Text>>() { @Override public Tuple2<LongWritable, Text> map(Tuple2<Long, String> value) throws Exception { return new Tuple2<>(new LongWritable(value.f0), new Text(value.f1)); } }).addSink( StreamingFileSink.forBulkFormat( testPath, new SequenceFileWriterFactory<>(configuration, LongWritable.class, Text.class, "BZip2") ).build()); env.execute(); validateResults(folder, testData); }
Example #14
Source File: ParquetStreamingFileSinkITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testWriteParquetAvroGeneric() throws Exception { final File folder = TEMPORARY_FOLDER.newFolder(); final Schema schema = Address.getClassSchema(); final Collection<GenericRecord> data = new GenericTestDataCollection(); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); DataStream<GenericRecord> stream = env.addSource( new FiniteTestSource<>(data), new GenericRecordAvroTypeInfo(schema)); stream.addSink( StreamingFileSink.forBulkFormat( Path.fromLocalFile(folder), ParquetAvroWriters.forGenericRecord(schema)) .build()); env.execute(); List<Address> expected = Arrays.asList( new Address(1, "a", "b", "c", "12345"), new Address(2, "x", "y", "z", "98765")); validateResults(folder, SpecificData.get(), expected); }
Example #15
Source File: ParquetStreamingFileSinkITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testWriteParquetAvroSpecific() throws Exception { final File folder = TEMPORARY_FOLDER.newFolder(); final List<Address> data = Arrays.asList( new Address(1, "a", "b", "c", "12345"), new Address(2, "p", "q", "r", "12345"), new Address(3, "x", "y", "z", "12345") ); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); DataStream<Address> stream = env.addSource( new FiniteTestSource<>(data), TypeInformation.of(Address.class)); stream.addSink( StreamingFileSink.forBulkFormat( Path.fromLocalFile(folder), ParquetAvroWriters.forSpecificRecord(Address.class)) .build()); env.execute(); validateResults(folder, SpecificData.get(), data); }
Example #16
Source File: SequenceStreamingFileSinkITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testWriteSequenceFile() throws Exception { final File folder = TEMPORARY_FOLDER.newFolder(); final Path testPath = Path.fromLocalFile(folder); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); DataStream<Tuple2<Long, String>> stream = env.addSource( new FiniteTestSource<>(testData), TypeInformation.of(new TypeHint<Tuple2<Long, String>>() { }) ); stream.map(new MapFunction<Tuple2<Long, String>, Tuple2<LongWritable, Text>>() { @Override public Tuple2<LongWritable, Text> map(Tuple2<Long, String> value) throws Exception { return new Tuple2<>(new LongWritable(value.f0), new Text(value.f1)); } }).addSink( StreamingFileSink.forBulkFormat( testPath, new SequenceFileWriterFactory<>(configuration, LongWritable.class, Text.class, "BZip2") ).build()); env.execute(); validateResults(folder, testData); }
Example #17
Source File: ParquetStreamingFileSinkITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testWriteParquetAvroGeneric() throws Exception { final File folder = TEMPORARY_FOLDER.newFolder(); final Schema schema = Address.getClassSchema(); final Collection<GenericRecord> data = new GenericTestDataCollection(); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); DataStream<GenericRecord> stream = env.addSource( new FiniteTestSource<>(data), new GenericRecordAvroTypeInfo(schema)); stream.addSink( StreamingFileSink.forBulkFormat( Path.fromLocalFile(folder), ParquetAvroWriters.forGenericRecord(schema)) .build()); env.execute(); List<Address> expected = Arrays.asList( new Address(1, "a", "b", "c", "12345"), new Address(2, "x", "y", "z", "98765")); validateResults(folder, SpecificData.get(), expected); }
Example #18
Source File: ParquetStreamingFileSinkITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testWriteParquetAvroSpecific() throws Exception { final File folder = TEMPORARY_FOLDER.newFolder(); final List<Address> data = Arrays.asList( new Address(1, "a", "b", "c", "12345"), new Address(2, "p", "q", "r", "12345"), new Address(3, "x", "y", "z", "12345") ); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); DataStream<Address> stream = env.addSource( new FiniteTestSource<>(data), TypeInformation.of(Address.class)); stream.addSink( StreamingFileSink.forBulkFormat( Path.fromLocalFile(folder), ParquetAvroWriters.forSpecificRecord(Address.class)) .build()); env.execute(); validateResults(folder, SpecificData.get(), data); }
Example #19
Source File: HiveTableSinkITCase.java From flink with Apache License 2.0 | 4 votes |
private void testStreamingWrite( boolean part, boolean useMr, boolean defaultSer, Consumer<String> pathConsumer) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(100); StreamTableEnvironment tEnv = HiveTestUtils.createTableEnvWithBlinkPlannerStreamMode(env); tEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog); tEnv.useCatalog(hiveCatalog.getName()); tEnv.getConfig().setSqlDialect(SqlDialect.HIVE); if (useMr) { tEnv.getConfig().getConfiguration().set( HiveOptions.TABLE_EXEC_HIVE_FALLBACK_MAPRED_WRITER, true); } else { tEnv.getConfig().getConfiguration().set( HiveOptions.TABLE_EXEC_HIVE_FALLBACK_MAPRED_WRITER, false); } try { tEnv.executeSql("create database db1"); tEnv.useDatabase("db1"); // prepare source List<Row> data = Arrays.asList( Row.of(1, "a", "b", "2020-05-03", "7"), Row.of(2, "p", "q", "2020-05-03", "8"), Row.of(3, "x", "y", "2020-05-03", "9"), Row.of(4, "x", "y", "2020-05-03", "10"), Row.of(5, "x", "y", "2020-05-03", "11")); DataStream<Row> stream = env.addSource( new FiniteTestSource<>(data), new RowTypeInfo(Types.INT, Types.STRING, Types.STRING, Types.STRING, Types.STRING)); tEnv.createTemporaryView("my_table", stream, $("a"), $("b"), $("c"), $("d"), $("e")); // DDL tEnv.executeSql("create external table sink_table (a int,b string,c string" + (part ? "" : ",d string,e string") + ") " + (part ? "partitioned by (d string,e string) " : "") + (defaultSer ? "" : " stored as parquet") + " TBLPROPERTIES (" + "'" + PARTITION_TIME_EXTRACTOR_TIMESTAMP_PATTERN.key() + "'='$d $e:00:00'," + "'" + SINK_PARTITION_COMMIT_DELAY.key() + "'='1h'," + "'" + SINK_PARTITION_COMMIT_POLICY_KIND.key() + "'='metastore,success-file'," + "'" + SINK_PARTITION_COMMIT_SUCCESS_FILE_NAME.key() + "'='_MY_SUCCESS'" + ")"); TableEnvUtil.execInsertTableAndWaitResult( tEnv.sqlQuery("select * from my_table"), "sink_table"); assertBatch("db1.sink_table", Arrays.asList( "1,a,b,2020-05-03,7", "1,a,b,2020-05-03,7", "2,p,q,2020-05-03,8", "2,p,q,2020-05-03,8", "3,x,y,2020-05-03,9", "3,x,y,2020-05-03,9", "4,x,y,2020-05-03,10", "4,x,y,2020-05-03,10", "5,x,y,2020-05-03,11", "5,x,y,2020-05-03,11")); // using batch table env to query. List<String> results = new ArrayList<>(); TableEnvironment batchTEnv = HiveTestUtils.createTableEnvWithBlinkPlannerBatchMode(); batchTEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog); batchTEnv.useCatalog(hiveCatalog.getName()); batchTEnv.executeSql("select * from db1.sink_table").collect() .forEachRemaining(r -> results.add(r.toString())); results.sort(String::compareTo); Assert.assertEquals( Arrays.asList( "1,a,b,2020-05-03,7", "1,a,b,2020-05-03,7", "2,p,q,2020-05-03,8", "2,p,q,2020-05-03,8", "3,x,y,2020-05-03,9", "3,x,y,2020-05-03,9", "4,x,y,2020-05-03,10", "4,x,y,2020-05-03,10", "5,x,y,2020-05-03,11", "5,x,y,2020-05-03,11"), results); pathConsumer.accept(URI.create(hiveCatalog.getHiveTable( ObjectPath.fromString("db1.sink_table")).getSd().getLocation()).getPath()); } finally { tEnv.executeSql("drop database db1 cascade"); } }