Java Code Examples for org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter#write()
The following examples show how to use
org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter#write() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: OrcTester.java From presto with Apache License 2.0 | 6 votes |
public static DataSize writeOrcFileColumnHive(File outputFile, RecordWriter recordWriter, Type type, Iterator<?> values) throws Exception { SettableStructObjectInspector objectInspector = createSettableStructObjectInspector("test", type); Object row = objectInspector.create(); List<StructField> fields = ImmutableList.copyOf(objectInspector.getAllStructFieldRefs()); Serializer serializer = new OrcSerde(); while (values.hasNext()) { Object value = values.next(); value = preprocessWriteValueHive(type, value); objectInspector.setStructFieldData(row, fields.get(0), value); Writable record = serializer.serialize(row, objectInspector); recordWriter.write(record); } recordWriter.close(false); return succinctBytes(outputFile.length()); }
Example 2
Source File: RcFileTester.java From presto with Apache License 2.0 | 5 votes |
private static DataSize writeRcFileColumnOld(File outputFile, Format format, Compression compression, Type type, Iterator<?> values) throws Exception { ObjectInspector columnObjectInspector = getJavaObjectInspector(type); RecordWriter recordWriter = createRcFileWriterOld(outputFile, compression, columnObjectInspector); SettableStructObjectInspector objectInspector = createSettableStructObjectInspector("test", columnObjectInspector); Object row = objectInspector.create(); List<StructField> fields = ImmutableList.copyOf(objectInspector.getAllStructFieldRefs()); Serializer serializer = format.createSerializer(); Properties tableProperties = new Properties(); tableProperties.setProperty("columns", "test"); tableProperties.setProperty("columns.types", objectInspector.getTypeName()); serializer.initialize(new JobConf(false), tableProperties); while (values.hasNext()) { Object value = values.next(); value = preprocessWriteValueOld(type, value); objectInspector.setStructFieldData(row, fields.get(0), value); Writable record = serializer.serialize(row, objectInspector); recordWriter.write(record); } recordWriter.close(false); return DataSize.ofBytes(outputFile.length()).succinct(); }
Example 3
Source File: ParquetTester.java From presto with Apache License 2.0 | 5 votes |
private static void writeParquetColumn( JobConf jobConf, File outputFile, CompressionCodecName compressionCodecName, Properties tableProperties, SettableStructObjectInspector objectInspector, Iterator<?>[] valuesByField, Optional<MessageType> parquetSchema, boolean singleLevelArray) throws Exception { RecordWriter recordWriter = new TestMapredParquetOutputFormat(parquetSchema, singleLevelArray) .getHiveRecordWriter( jobConf, new Path(outputFile.toURI()), Text.class, compressionCodecName != UNCOMPRESSED, tableProperties, () -> {}); Object row = objectInspector.create(); List<StructField> fields = ImmutableList.copyOf(objectInspector.getAllStructFieldRefs()); while (stream(valuesByField).allMatch(Iterator::hasNext)) { for (int field = 0; field < fields.size(); field++) { Object value = valuesByField[field].next(); objectInspector.setStructFieldData(row, fields.get(field), value); } ParquetHiveSerDe serde = new ParquetHiveSerDe(); serde.initialize(jobConf, tableProperties, null); Writable record = serde.serialize(row, objectInspector); recordWriter.write(record); } recordWriter.close(false); }
Example 4
Source File: ParquetRecordWriterUtil.java From presto with Apache License 2.0 | 4 votes |
public static RecordWriter createParquetWriter(Path target, JobConf conf, Properties properties, ConnectorSession session) throws IOException, ReflectiveOperationException { conf.setLong(ParquetOutputFormat.BLOCK_SIZE, getParquetWriterBlockSize(session).toBytes()); conf.setLong(ParquetOutputFormat.PAGE_SIZE, getParquetWriterPageSize(session).toBytes()); RecordWriter recordWriter = createParquetWriter(target, conf, properties); Object realWriter = REAL_WRITER_FIELD.get(recordWriter); Object internalWriter = INTERNAL_WRITER_FIELD.get(realWriter); ParquetFileWriter fileWriter = (ParquetFileWriter) FILE_WRITER_FIELD.get(internalWriter); return new ExtendedRecordWriter() { private long length; @Override public long getWrittenBytes() { return length; } @Override public void write(Writable value) throws IOException { recordWriter.write(value); length = fileWriter.getPos(); } @Override public void close(boolean abort) throws IOException { recordWriter.close(abort); if (!abort) { length = fileWriter.getPos(); } } }; }
Example 5
Source File: TestOrcPageSourceMemoryTracking.java From presto with Apache License 2.0 | 4 votes |
public static FileSplit createTestFile( String filePath, Serializer serializer, String compressionCodec, List<TestColumn> testColumns, int numRows, int stripeRows) throws Exception { // filter out partition keys, which are not written to the file testColumns = testColumns.stream() .filter(column -> !column.isPartitionKey()) .collect(toImmutableList()); Properties tableProperties = new Properties(); tableProperties.setProperty( "columns", testColumns.stream() .map(TestColumn::getName) .collect(Collectors.joining(","))); tableProperties.setProperty( "columns.types", testColumns.stream() .map(TestColumn::getType) .collect(Collectors.joining(","))); serializer.initialize(CONFIGURATION, tableProperties); JobConf jobConf = new JobConf(); if (compressionCodec != null) { CompressionCodec codec = new CompressionCodecFactory(CONFIGURATION).getCodecByName(compressionCodec); jobConf.set(COMPRESS_CODEC, codec.getClass().getName()); jobConf.set(COMPRESS_TYPE, SequenceFile.CompressionType.BLOCK.toString()); } RecordWriter recordWriter = createRecordWriter(new Path(filePath), CONFIGURATION); try { SettableStructObjectInspector objectInspector = getStandardStructObjectInspector( testColumns.stream() .map(TestColumn::getName) .collect(toImmutableList()), testColumns.stream() .map(TestColumn::getObjectInspector) .collect(toImmutableList())); Object row = objectInspector.create(); List<StructField> fields = ImmutableList.copyOf(objectInspector.getAllStructFieldRefs()); for (int rowNumber = 0; rowNumber < numRows; rowNumber++) { for (int i = 0; i < testColumns.size(); i++) { Object writeValue = testColumns.get(i).getWriteValue(); if (writeValue instanceof Slice) { writeValue = ((Slice) writeValue).getBytes(); } objectInspector.setStructFieldData(row, fields.get(i), writeValue); } Writable record = serializer.serialize(row, objectInspector); recordWriter.write(record); if (rowNumber % stripeRows == stripeRows - 1) { flushStripe(recordWriter); } } } finally { recordWriter.close(false); } Path path = new Path(filePath); path.getFileSystem(CONFIGURATION).setVerifyChecksum(true); File file = new File(filePath); return new FileSplit(path, 0, file.length(), new String[0]); }
Example 6
Source File: AbstractTestHiveFileFormats.java From presto with Apache License 2.0 | 4 votes |
public static FileSplit createTestFile( String filePath, HiveStorageFormat storageFormat, HiveCompressionCodec compressionCodec, List<TestColumn> testColumns, int numRows) throws Exception { HiveOutputFormat<?, ?> outputFormat = newInstance(storageFormat.getOutputFormat(), HiveOutputFormat.class); Serializer serializer = newInstance(storageFormat.getSerDe(), Serializer.class); // filter out partition keys, which are not written to the file testColumns = testColumns.stream() .filter(column -> !column.isPartitionKey()) .collect(toImmutableList()); Properties tableProperties = new Properties(); tableProperties.setProperty( "columns", testColumns.stream() .map(TestColumn::getName) .collect(Collectors.joining(","))); tableProperties.setProperty( "columns.types", testColumns.stream() .map(TestColumn::getType) .collect(Collectors.joining(","))); serializer.initialize(new Configuration(false), tableProperties); JobConf jobConf = new JobConf(); configureCompression(jobConf, compressionCodec); RecordWriter recordWriter = outputFormat.getHiveRecordWriter( jobConf, new Path(filePath), Text.class, compressionCodec != HiveCompressionCodec.NONE, tableProperties, () -> {}); try { serializer.initialize(new Configuration(false), tableProperties); SettableStructObjectInspector objectInspector = getStandardStructObjectInspector( testColumns.stream() .map(TestColumn::getName) .collect(toImmutableList()), testColumns.stream() .map(TestColumn::getObjectInspector) .collect(toImmutableList())); Object row = objectInspector.create(); List<StructField> fields = ImmutableList.copyOf(objectInspector.getAllStructFieldRefs()); for (int rowNumber = 0; rowNumber < numRows; rowNumber++) { for (int i = 0; i < testColumns.size(); i++) { Object writeValue = testColumns.get(i).getWriteValue(); if (writeValue instanceof Slice) { writeValue = ((Slice) writeValue).getBytes(); } objectInspector.setStructFieldData(row, fields.get(i), writeValue); } Writable record = serializer.serialize(row, objectInspector); recordWriter.write(record); } } finally { recordWriter.close(false); } // todo to test with compression, the file must be renamed with the compression extension Path path = new Path(filePath); path.getFileSystem(new Configuration(false)).setVerifyChecksum(true); File file = new File(filePath); return new FileSplit(path, 0, file.length(), new String[0]); }