Java Code Examples for org.apache.flink.shaded.guava18.com.google.common.collect.ImmutableMap#Builder
The following examples show how to use
org.apache.flink.shaded.guava18.com.google.common.collect.ImmutableMap#Builder .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ParquetRecordReaderTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testMapGroup() throws IOException { Preconditions.checkState(unWrapSchema(NESTED_SCHEMA.getField("spamMap").schema()) .getType().equals(Schema.Type.MAP)); ImmutableMap.Builder<String, String> map = ImmutableMap.builder(); map.put("testKey", "testValue"); GenericRecord record = new GenericRecordBuilder(NESTED_SCHEMA) .set("foo", 32L) .set("spamMap", map.build()) .build(); Path path = createTempParquetFile(tempRoot.getRoot(), NESTED_SCHEMA, Collections.singletonList(record)); MessageType readSchema = (new AvroSchemaConverter()).convert(NESTED_SCHEMA); ParquetRecordReader<Row> rowReader = new ParquetRecordReader<>(new RowReadSupport(), readSchema); InputFile inputFile = HadoopInputFile.fromPath(new org.apache.hadoop.fs.Path(path.toUri()), testConfig); ParquetReadOptions options = ParquetReadOptions.builder().build(); ParquetFileReader fileReader = new ParquetFileReader(inputFile, options); rowReader.initialize(fileReader, testConfig); assertFalse(rowReader.reachEnd()); Row row = rowReader.nextRecord(); assertEquals(7, row.getArity()); assertEquals(32L, row.getField(0)); Map<?, ?> result = (Map<?, ?>) row.getField(1); assertEquals(result.get("testKey").toString(), "testValue"); assertTrue(rowReader.reachEnd()); }
Example 2
Source File: ParquetRecordReaderTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testNestedMapGroup() throws IOException { Schema nestedMapSchema = unWrapSchema(NESTED_SCHEMA.getField("nestedMap").schema()); Preconditions.checkState(nestedMapSchema.getType().equals(Schema.Type.MAP)); Schema mapValueSchema = nestedMapSchema.getValueType(); GenericRecord mapValue = new GenericRecordBuilder(mapValueSchema) .set("type", "nested") .set("value", "nested_value").build(); ImmutableMap.Builder<String, GenericRecord> map = ImmutableMap.builder(); map.put("testKey", mapValue); GenericRecord record = new GenericRecordBuilder(NESTED_SCHEMA) .set("nestedMap", map.build()) .set("foo", 34L).build(); Path path = createTempParquetFile(tempRoot.getRoot(), NESTED_SCHEMA, Collections.singletonList(record)); MessageType readSchema = (new AvroSchemaConverter()).convert(NESTED_SCHEMA); ParquetRecordReader<Row> rowReader = new ParquetRecordReader<>(new RowReadSupport(), readSchema); InputFile inputFile = HadoopInputFile.fromPath(new org.apache.hadoop.fs.Path(path.toUri()), testConfig); ParquetReadOptions options = ParquetReadOptions.builder().build(); ParquetFileReader fileReader = new ParquetFileReader(inputFile, options); rowReader.initialize(fileReader, testConfig); assertFalse(rowReader.reachEnd()); Row row = rowReader.nextRecord(); assertEquals(7, row.getArity()); assertEquals(34L, row.getField(0)); Map result = (Map) row.getField(5); Row nestedRow = (Row) result.get("testKey"); assertEquals("nested", nestedRow.getField(0)); assertEquals("nested_value", nestedRow.getField(1)); }
Example 3
Source File: KafkaShuffleITCase.java From flink with Apache License 2.0 | 5 votes |
private Map<Integer, Collection<ConsumerRecord<byte[], byte[]>>> testKafkaShuffleProducer( String topic, StreamExecutionEnvironment env, int numberOfPartitions, int producerParallelism, int numElementsPerProducer, TimeCharacteristic timeCharacteristic) throws Exception { createTestTopic(topic, numberOfPartitions, 1); env.setParallelism(producerParallelism); env.setRestartStrategy(RestartStrategies.noRestart()); env.setStreamTimeCharacteristic(timeCharacteristic); DataStream<Tuple3<Integer, Long, Integer>> source = env.addSource(new KafkaSourceFunction(numElementsPerProducer, false)).setParallelism(producerParallelism); DataStream<Tuple3<Integer, Long, Integer>> input = (timeCharacteristic == EventTime) ? source.assignTimestampsAndWatermarks(new PunctuatedExtractor()).setParallelism(producerParallelism) : source; Properties properties = kafkaServer.getStandardProperties(); Properties kafkaProperties = PropertiesUtil.flatten(properties); kafkaProperties.setProperty(PRODUCER_PARALLELISM, String.valueOf(producerParallelism)); kafkaProperties.setProperty(PARTITION_NUMBER, String.valueOf(numberOfPartitions)); kafkaProperties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer"); kafkaProperties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer"); FlinkKafkaShuffle.writeKeyBy(input, topic, kafkaProperties, 0); env.execute("Write to " + topic); ImmutableMap.Builder<Integer, Collection<ConsumerRecord<byte[], byte[]>>> results = ImmutableMap.builder(); for (int p = 0; p < numberOfPartitions; p++) { results.put(p, kafkaServer.getAllRecordsFromTopic(kafkaProperties, topic, p, 5000)); } deleteTestTopic(topic); return results.build(); }
Example 4
Source File: ParquetRecordReaderTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testMapGroup() throws IOException { Preconditions.checkState(unWrapSchema(NESTED_SCHEMA.getField("spamMap").schema()) .getType().equals(Schema.Type.MAP)); ImmutableMap.Builder<String, String> map = ImmutableMap.builder(); map.put("testKey", "testValue"); GenericRecord record = new GenericRecordBuilder(NESTED_SCHEMA) .set("foo", 32L) .set("spamMap", map.build()) .build(); Path path = createTempParquetFile(tempRoot.getRoot(), NESTED_SCHEMA, Collections.singletonList(record)); MessageType readSchema = (new AvroSchemaConverter()).convert(NESTED_SCHEMA); ParquetRecordReader<Row> rowReader = new ParquetRecordReader<>(new RowReadSupport(), readSchema); InputFile inputFile = HadoopInputFile.fromPath(new org.apache.hadoop.fs.Path(path.toUri()), testConfig); ParquetReadOptions options = ParquetReadOptions.builder().build(); ParquetFileReader fileReader = new ParquetFileReader(inputFile, options); rowReader.initialize(fileReader, testConfig); assertFalse(rowReader.reachEnd()); Row row = rowReader.nextRecord(); assertEquals(7, row.getArity()); assertEquals(32L, row.getField(0)); Map<?, ?> result = (Map<?, ?>) row.getField(1); assertEquals(result.get("testKey").toString(), "testValue"); assertTrue(rowReader.reachEnd()); }
Example 5
Source File: ParquetRecordReaderTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testNestedMapGroup() throws IOException { Schema nestedMapSchema = unWrapSchema(NESTED_SCHEMA.getField("nestedMap").schema()); Preconditions.checkState(nestedMapSchema.getType().equals(Schema.Type.MAP)); Schema mapValueSchema = nestedMapSchema.getValueType(); GenericRecord mapValue = new GenericRecordBuilder(mapValueSchema) .set("type", "nested") .set("value", "nested_value").build(); ImmutableMap.Builder<String, GenericRecord> map = ImmutableMap.builder(); map.put("testKey", mapValue); GenericRecord record = new GenericRecordBuilder(NESTED_SCHEMA) .set("nestedMap", map.build()) .set("foo", 34L).build(); Path path = createTempParquetFile(tempRoot.getRoot(), NESTED_SCHEMA, Collections.singletonList(record)); MessageType readSchema = (new AvroSchemaConverter()).convert(NESTED_SCHEMA); ParquetRecordReader<Row> rowReader = new ParquetRecordReader<>(new RowReadSupport(), readSchema); InputFile inputFile = HadoopInputFile.fromPath(new org.apache.hadoop.fs.Path(path.toUri()), testConfig); ParquetReadOptions options = ParquetReadOptions.builder().build(); ParquetFileReader fileReader = new ParquetFileReader(inputFile, options); rowReader.initialize(fileReader, testConfig); assertFalse(rowReader.reachEnd()); Row row = rowReader.nextRecord(); assertEquals(7, row.getArity()); assertEquals(34L, row.getField(0)); Map result = (Map) row.getField(5); Row nestedRow = (Row) result.get("testKey"); assertEquals("nested", nestedRow.getField(0)); assertEquals("nested_value", nestedRow.getField(1)); }