Java Code Examples for org.apache.iceberg.FileFormat#valueOf()
The following examples show how to use
org.apache.iceberg.FileFormat#valueOf() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RowDataRewriter.java From iceberg with Apache License 2.0 | 6 votes |
public RowDataRewriter(Table table, PartitionSpec spec, boolean caseSensitive, Broadcast<FileIO> io, Broadcast<EncryptionManager> encryptionManager) { this.schema = table.schema(); this.spec = spec; this.locations = table.locationProvider(); this.properties = table.properties(); this.io = io; this.encryptionManager = encryptionManager; this.caseSensitive = caseSensitive; this.nameMapping = table.properties().get(DEFAULT_NAME_MAPPING); String formatString = table.properties().getOrDefault( TableProperties.DEFAULT_FILE_FORMAT, TableProperties.DEFAULT_FILE_FORMAT_DEFAULT); this.format = FileFormat.valueOf(formatString.toUpperCase(Locale.ENGLISH)); }
Example 2
Source File: IcebergConfig.java From presto with Apache License 2.0 | 4 votes |
public FileFormat getFileFormat() { return FileFormat.valueOf(fileFormat.name()); }
Example 3
Source File: IcebergUtil.java From presto with Apache License 2.0 | 4 votes |
public static FileFormat getFileFormat(Table table) { return FileFormat.valueOf(table.properties() .getOrDefault(DEFAULT_FILE_FORMAT, DEFAULT_FILE_FORMAT_DEFAULT) .toUpperCase(Locale.ENGLISH)); }
Example 4
Source File: Writer.java From iceberg with Apache License 2.0 | 4 votes |
private FileFormat getFileFormat(Map<String, String> tableProperties, DataSourceOptions options) { Optional<String> formatOption = options.get("write-format"); String formatString = formatOption .orElse(tableProperties.getOrDefault(DEFAULT_FILE_FORMAT, DEFAULT_FILE_FORMAT_DEFAULT)); return FileFormat.valueOf(formatString.toUpperCase(Locale.ENGLISH)); }
Example 5
Source File: TestFilteredScan.java From iceberg with Apache License 2.0 | 4 votes |
@Before public void writeUnpartitionedTable() throws IOException { this.parent = temp.newFolder("TestFilteredScan"); this.unpartitioned = new File(parent, "unpartitioned"); File dataFolder = new File(unpartitioned, "data"); Assert.assertTrue("Mkdir should succeed", dataFolder.mkdirs()); Table table = TABLES.create(SCHEMA, PartitionSpec.unpartitioned(), unpartitioned.toString()); Schema tableSchema = table.schema(); // use the table schema because ids are reassigned FileFormat fileFormat = FileFormat.valueOf(format.toUpperCase(Locale.ENGLISH)); File testFile = new File(dataFolder, fileFormat.addExtension(UUID.randomUUID().toString())); // create records using the table's schema this.records = testRecords(tableSchema); switch (fileFormat) { case AVRO: try (FileAppender<Record> writer = Avro.write(localOutput(testFile)) .createWriterFunc(DataWriter::create) .schema(tableSchema) .build()) { writer.addAll(records); } break; case PARQUET: try (FileAppender<Record> writer = Parquet.write(localOutput(testFile)) .createWriterFunc(GenericParquetWriter::buildWriter) .schema(tableSchema) .build()) { writer.addAll(records); } break; case ORC: try (FileAppender<Record> writer = ORC.write(localOutput(testFile)) .createWriterFunc(GenericOrcWriter::buildWriter) .schema(tableSchema) .build()) { writer.addAll(records); } break; } DataFile file = DataFiles.builder(PartitionSpec.unpartitioned()) .withRecordCount(records.size()) .withFileSizeInBytes(testFile.length()) .withPath(testFile.toString()) .build(); table.newAppend().appendFile(file).commit(); }
Example 6
Source File: TestLocalScan.java From iceberg with Apache License 2.0 | 4 votes |
public TestLocalScan(String format) { this.format = FileFormat.valueOf(format.toUpperCase(Locale.ENGLISH)); }
Example 7
Source File: TestMetricsRowGroupFilter.java From iceberg with Apache License 2.0 | 4 votes |
public TestMetricsRowGroupFilter(String format) { this.format = FileFormat.valueOf(format.toUpperCase(Locale.ENGLISH)); }
Example 8
Source File: TestMetricsRowGroupFilterTypes.java From iceberg with Apache License 2.0 | 4 votes |
public TestMetricsRowGroupFilterTypes(String format, String column, Object readValue, Object skipValue) { this.format = FileFormat.valueOf(format.toUpperCase(Locale.ENGLISH)); this.column = column; this.readValue = readValue; this.skipValue = skipValue; }
Example 9
Source File: TestIcebergInputFormat.java From iceberg with Apache License 2.0 | 4 votes |
public TestIcebergInputFormat(String format) { this.format = FileFormat.valueOf(format.toUpperCase(Locale.ENGLISH)); }
Example 10
Source File: SparkDataFile.java From iceberg with Apache License 2.0 | 4 votes |
@Override public FileFormat format() { String formatAsString = wrapped.getString(fileFormatPosition).toUpperCase(Locale.ROOT); return FileFormat.valueOf(formatAsString); }
Example 11
Source File: TestSparkReadProjection.java From iceberg with Apache License 2.0 | 4 votes |
public TestSparkReadProjection(String format, boolean vectorized) { super(format); this.format = FileFormat.valueOf(format.toUpperCase(Locale.ROOT)); this.vectorized = vectorized; }
Example 12
Source File: TestSparkDataWrite.java From iceberg with Apache License 2.0 | 4 votes |
public TestSparkDataWrite(String format) { this.format = FileFormat.valueOf(format.toUpperCase(Locale.ENGLISH)); }
Example 13
Source File: SparkBatchWrite.java From iceberg with Apache License 2.0 | 4 votes |
protected FileFormat getFileFormat(Map<String, String> tableProperties, Map<String, String> options) { Optional<String> formatOption = Optional.ofNullable(options.get("write-format")); String formatString = formatOption .orElse(tableProperties.getOrDefault(DEFAULT_FILE_FORMAT, DEFAULT_FILE_FORMAT_DEFAULT)); return FileFormat.valueOf(formatString.toUpperCase(Locale.ENGLISH)); }
Example 14
Source File: TestFilteredScan.java From iceberg with Apache License 2.0 | 4 votes |
@Before public void writeUnpartitionedTable() throws IOException { this.parent = temp.newFolder("TestFilteredScan"); this.unpartitioned = new File(parent, "unpartitioned"); File dataFolder = new File(unpartitioned, "data"); Assert.assertTrue("Mkdir should succeed", dataFolder.mkdirs()); Table table = TABLES.create(SCHEMA, PartitionSpec.unpartitioned(), unpartitioned.toString()); Schema tableSchema = table.schema(); // use the table schema because ids are reassigned FileFormat fileFormat = FileFormat.valueOf(format.toUpperCase(Locale.ENGLISH)); File testFile = new File(dataFolder, fileFormat.addExtension(UUID.randomUUID().toString())); // create records using the table's schema org.apache.avro.Schema avroSchema = AvroSchemaUtil.convert(tableSchema, "test"); this.records = testRecords(avroSchema); switch (fileFormat) { case AVRO: try (FileAppender<Record> writer = Avro.write(localOutput(testFile)) .schema(tableSchema) .build()) { writer.addAll(records); } break; case PARQUET: try (FileAppender<Record> writer = Parquet.write(localOutput(testFile)) .schema(tableSchema) .build()) { writer.addAll(records); } break; } DataFile file = DataFiles.builder(PartitionSpec.unpartitioned()) .withRecordCount(records.size()) .withFileSizeInBytes(testFile.length()) .withPath(testFile.toString()) .build(); table.newAppend().appendFile(file).commit(); }