Java Code Examples for org.apache.flink.api.java.typeutils.RowTypeInfo#getTypeAt()
The following examples show how to use
org.apache.flink.api.java.typeutils.RowTypeInfo#getTypeAt() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ParquetInputFormat.java From flink with Apache License 2.0 | 6 votes |
/** * Configures the fields to be read and returned by the ParquetInputFormat. Selected fields must be present * in the configured schema. * * @param fieldNames Names of all selected fields. */ public void selectFields(String[] fieldNames) { checkNotNull(fieldNames, "fieldNames"); this.fieldNames = fieldNames; RowTypeInfo rowTypeInfo = (RowTypeInfo) ParquetSchemaConverter.fromParquetType(expectedFileSchema); TypeInformation[] selectFieldTypes = new TypeInformation[fieldNames.length]; for (int i = 0; i < fieldNames.length; i++) { try { selectFieldTypes[i] = rowTypeInfo.getTypeAt(fieldNames[i]); } catch (IndexOutOfBoundsException e) { throw new IllegalArgumentException(String.format("Fail to access Field %s , " + "which is not contained in the file schema", fieldNames[i]), e); } } this.fieldTypes = selectFieldTypes; }
Example 2
Source File: SideStream.java From alchemy with Apache License 2.0 | 6 votes |
private static RowTypeInfo createReturnType(TableSchema leftTable, RowTypeInfo sideType) { String[] leftFields = leftTable.getColumnNames(); TypeInformation[] leftTypes = leftTable.getTypes(); int leftArity = leftFields.length; int rightArity = sideType.getArity(); int size = leftArity + rightArity; String[] columnNames = new String[size]; TypeInformation[] columnTypes = new TypeInformation[size]; for (int i = 0; i < leftArity; i++) { columnNames[i] = leftFields[i]; columnTypes[i] = leftTypes[i]; } for (int i = 0; i < rightArity; i++) { columnNames[leftArity + i] = sideType.getFieldNames()[i]; columnTypes[leftArity + i] = sideType.getTypeAt(i); } return new RowTypeInfo(columnTypes, columnNames); }
Example 3
Source File: ParquetInputFormat.java From flink with Apache License 2.0 | 6 votes |
/** * Configures the fields to be read and returned by the ParquetInputFormat. Selected fields must be present * in the configured schema. * * @param fieldNames Names of all selected fields. */ public void selectFields(String[] fieldNames) { checkNotNull(fieldNames, "fieldNames"); this.fieldNames = fieldNames; RowTypeInfo rowTypeInfo = (RowTypeInfo) ParquetSchemaConverter.fromParquetType(expectedFileSchema); TypeInformation[] selectFieldTypes = new TypeInformation[fieldNames.length]; for (int i = 0; i < fieldNames.length; i++) { try { selectFieldTypes[i] = rowTypeInfo.getTypeAt(fieldNames[i]); } catch (IndexOutOfBoundsException e) { throw new IllegalArgumentException(String.format("Fail to access Field %s , " + "which is not contained in the file schema", fieldNames[i]), e); } } this.fieldTypes = selectFieldTypes; }
Example 4
Source File: ParquetInputFormat.java From flink with Apache License 2.0 | 5 votes |
/** * Generates and returns the read schema based on the projected fields for a given file. * * @param fileSchema The schema of the given file. * @param filePath The path of the given file. * @return The read schema based on the given file's schema and the projected fields. */ private MessageType getReadSchema(MessageType fileSchema, Path filePath) { RowTypeInfo fileTypeInfo = (RowTypeInfo) ParquetSchemaConverter.fromParquetType(fileSchema); List<Type> types = new ArrayList<>(); for (int i = 0; i < fieldNames.length; ++i) { String readFieldName = fieldNames[i]; TypeInformation<?> readFieldType = fieldTypes[i]; if (fileTypeInfo.getFieldIndex(readFieldName) < 0) { if (!skipWrongSchemaFileSplit) { throw new IllegalArgumentException("Field " + readFieldName + " cannot be found in schema of " + " Parquet file: " + filePath + "."); } else { this.skipThisSplit = true; return fileSchema; } } if (!readFieldType.equals(fileTypeInfo.getTypeAt(readFieldName))) { if (!skipWrongSchemaFileSplit) { throw new IllegalArgumentException("Expecting type " + readFieldType + " for field " + readFieldName + " but found type " + fileTypeInfo.getTypeAt(readFieldName) + " in Parquet file: " + filePath + "."); } else { this.skipThisSplit = true; return fileSchema; } } types.add(fileSchema.getType(readFieldName)); } return new MessageType(fileSchema.getName(), types); }
Example 5
Source File: ParquetInputFormat.java From flink with Apache License 2.0 | 5 votes |
/** * Generates and returns the read schema based on the projected fields for a given file. * * @param fileSchema The schema of the given file. * @param filePath The path of the given file. * @return The read schema based on the given file's schema and the projected fields. */ private MessageType getReadSchema(MessageType fileSchema, Path filePath) { RowTypeInfo fileTypeInfo = (RowTypeInfo) ParquetSchemaConverter.fromParquetType(fileSchema); List<Type> types = new ArrayList<>(); for (int i = 0; i < fieldNames.length; ++i) { String readFieldName = fieldNames[i]; TypeInformation<?> readFieldType = fieldTypes[i]; if (fileTypeInfo.getFieldIndex(readFieldName) < 0) { if (!skipWrongSchemaFileSplit) { throw new IllegalArgumentException("Field " + readFieldName + " cannot be found in schema of " + " Parquet file: " + filePath + "."); } else { this.skipThisSplit = true; return fileSchema; } } if (!readFieldType.equals(fileTypeInfo.getTypeAt(readFieldName))) { if (!skipWrongSchemaFileSplit) { throw new IllegalArgumentException("Expecting type " + readFieldType + " for field " + readFieldName + " but found type " + fileTypeInfo.getTypeAt(readFieldName) + " in Parquet file: " + filePath + "."); } else { this.skipThisSplit = true; return fileSchema; } } types.add(fileSchema.getType(readFieldName)); } return new MessageType(fileSchema.getName(), types); }