Java Code Examples for org.apache.flink.table.descriptors.DescriptorProperties#putTableSchema()
The following examples show how to use
org.apache.flink.table.descriptors.DescriptorProperties#putTableSchema() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HBaseConnectorITCase.java From flink with Apache License 2.0 | 6 votes |
private static Map<String, String> hbaseTableProperties() { Map<String, String> properties = new HashMap<>(); properties.put(CONNECTOR_TYPE, CONNECTOR_TYPE_VALUE_HBASE); properties.put(CONNECTOR_VERSION, CONNECTOR_VERSION_VALUE_143); properties.put(CONNECTOR_PROPERTY_VERSION, "1"); properties.put(CONNECTOR_TABLE_NAME, TEST_TABLE_1); // get zk quorum from "hbase-site.xml" in classpath String hbaseZk = HBaseConfiguration.create().get(HConstants.ZOOKEEPER_QUORUM); properties.put(CONNECTOR_ZK_QUORUM, hbaseZk); // schema String[] columnNames = {FAMILY1, ROWKEY, FAMILY2, FAMILY3}; TypeInformation<Row> f1 = Types.ROW_NAMED(new String[]{F1COL1}, Types.INT); TypeInformation<Row> f2 = Types.ROW_NAMED(new String[]{F2COL1, F2COL2}, Types.STRING, Types.LONG); TypeInformation<Row> f3 = Types.ROW_NAMED(new String[]{F3COL1, F3COL2, F3COL3}, Types.DOUBLE, Types.BOOLEAN, Types.STRING); TypeInformation[] columnTypes = new TypeInformation[]{f1, Types.INT, f2, f3}; DescriptorProperties descriptorProperties = new DescriptorProperties(true); TableSchema tableSchema = new TableSchema(columnNames, columnTypes); descriptorProperties.putTableSchema(SCHEMA, tableSchema); descriptorProperties.putProperties(properties); return descriptorProperties.asMap(); }
Example 2
Source File: HBaseTableFactoryTest.java From flink with Apache License 2.0 | 6 votes |
private DescriptorProperties createDescriptor(String[] columnNames, TypeInformation[] columnTypes) { TableSchema tableSchema = new TableSchema(columnNames, columnTypes); Map<String, String> tableProperties = new HashMap<>(); tableProperties.put("connector.type", "hbase"); tableProperties.put("connector.version", "1.4.3"); tableProperties.put("connector.property-version", "1"); tableProperties.put("connector.table-name", "testHBastTable"); tableProperties.put("connector.zookeeper.quorum", "localhost:2181"); tableProperties.put("connector.zookeeper.znode.parent", "/flink"); tableProperties.put("connector.write.buffer-flush.max-size", "10mb"); tableProperties.put("connector.write.buffer-flush.max-rows", "1000"); tableProperties.put("connector.write.buffer-flush.interval", "10s"); DescriptorProperties descriptorProperties = new DescriptorProperties(true); descriptorProperties.putTableSchema(SCHEMA, tableSchema); descriptorProperties.putProperties(tableProperties); return descriptorProperties; }
Example 3
Source File: HBaseTableFactoryTest.java From flink with Apache License 2.0 | 6 votes |
private DescriptorProperties createDescriptor(TableSchema tableSchema) { Map<String, String> tableProperties = new HashMap<>(); tableProperties.put("connector.type", "hbase"); tableProperties.put("connector.version", "1.4.3"); tableProperties.put("connector.property-version", "1"); tableProperties.put("connector.table-name", "testHBastTable"); tableProperties.put("connector.zookeeper.quorum", "localhost:2181"); tableProperties.put("connector.zookeeper.znode.parent", "/flink"); tableProperties.put("connector.write.buffer-flush.max-size", "10mb"); tableProperties.put("connector.write.buffer-flush.max-rows", "1000"); tableProperties.put("connector.write.buffer-flush.interval", "10s"); DescriptorProperties descriptorProperties = new DescriptorProperties(true); descriptorProperties.putTableSchema(SCHEMA, tableSchema); descriptorProperties.putProperties(tableProperties); return descriptorProperties; }
Example 4
Source File: CsvTableSinkFactoryTest.java From flink with Apache License 2.0 | 6 votes |
private DescriptorProperties createDescriptor(TableSchema schema) { Map<String, String> properties = new HashMap<>(); properties.put("connector.type", "filesystem"); properties.put("connector.property-version", "1"); properties.put("connector.path", "/path/to/csv"); // schema properties.put("format.type", "csv"); properties.put("format.property-version", "1"); properties.put("format.field-delimiter", ";"); DescriptorProperties descriptor = new DescriptorProperties(true); descriptor.putProperties(properties); descriptor.putTableSchema(SCHEMA, schema); if (deriveSchema == TernaryBoolean.TRUE) { descriptor.putBoolean("format.derive-schema", true); } else if (deriveSchema == TernaryBoolean.FALSE) { descriptor.putTableSchema(FORMAT_FIELDS, testingSchema); } // nothing to put for UNDEFINED return descriptor; }
Example 5
Source File: MockExternalCatalogTable.java From AthenaX with Apache License 2.0 | 5 votes |
ExternalCatalogTable toExternalCatalogTable() { TableSchema tableSchema = new TableSchema(schema.getFieldNames(), schema.getFieldTypes()); ConnectorDescriptor descriptor = new ConnectorDescriptor(CONNECTOR_TYPE, CONNECTOR_VERSION, false) { @Override public void addConnectorProperties(DescriptorProperties properties) { properties.putTableSchema(TABLE_SCHEMA_CONNECTOR_PROPERTY, tableSchema); properties.putString(TABLE_DATA_CONNECTOR_PROPERTY, serializeRows()); } }; return new ExternalCatalogTable(descriptor, Option.empty(), Option.empty(), Option.empty(), Option.empty()); }
Example 6
Source File: KafkaJsonConnectorITest.java From AthenaX with Apache License 2.0 | 5 votes |
private static ExternalCatalogTable mockExternalCatalogTable(String topic, String brokerAddress) { TableSchema schema = new TableSchema(new String[] {"foo"}, new TypeInformation[] {INT_TYPE_INFO}); ConnectorDescriptor descriptor = new ConnectorDescriptor("kafka+json", 1, false) { @Override public void addConnectorProperties(DescriptorProperties properties) { properties.putTableSchema(TOPIC_SCHEMA_KEY, schema); properties.putString(TOPIC_NAME_KEY, topic); properties.putString(KAFKA_CONFIG_PREFIX + "." + ConsumerConfig.GROUP_ID_CONFIG, "foo"); properties.putString(KAFKA_CONFIG_PREFIX + "." + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerAddress); properties.putString(KAFKA_CONFIG_PREFIX + "." + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); } }; return new ExternalCatalogTable(descriptor, Option.empty(), Option.empty(), Option.empty(), Option.empty()); }
Example 7
Source File: ITestUtil.java From AthenaX with Apache License 2.0 | 5 votes |
static KafkaInputExternalCatalogTable getKafkaExternalCatalogTable(Map<String, String> props) { ConnectorDescriptor descriptor = new ConnectorDescriptor("kafka+json", 1, false) { @Override public void addConnectorProperties(DescriptorProperties properties) { properties.putTableSchema("athenax.kafka.topic.schema", KafkaInputExternalCatalogTable.SCHEMA); properties.putProperties(props); } }; return new KafkaInputExternalCatalogTable(descriptor); }
Example 8
Source File: JdbcTableSourceSinkFactoryTest.java From flink with Apache License 2.0 | 5 votes |
private Map<String, String> getBasicProperties() { Map<String, String> properties = new HashMap<>(); properties.put("connector.type", "jdbc"); properties.put("connector.property-version", "1"); properties.put("connector.url", "jdbc:derby:memory:mydb"); properties.put("connector.table", "mytable"); DescriptorProperties descriptorProperties = new DescriptorProperties(); descriptorProperties.putProperties(properties); descriptorProperties.putTableSchema("schema", schema); return new HashMap<>(descriptorProperties.asMap()); }
Example 9
Source File: CatalogTableImpl.java From flink with Apache License 2.0 | 5 votes |
@Override public Map<String, String> toProperties() { DescriptorProperties descriptor = new DescriptorProperties(); descriptor.putTableSchema(Schema.SCHEMA, getSchema()); descriptor.putPartitionKeys(getPartitionKeys()); Map<String, String> properties = new HashMap<>(getProperties()); properties.remove(CatalogConfig.IS_GENERIC); descriptor.putProperties(properties); return descriptor.asMap(); }
Example 10
Source File: CatalogTableImpl.java From flink with Apache License 2.0 | 5 votes |
/** * Construct catalog table properties from {@link #toProperties()}. */ public static Map<String, String> removeRedundant( Map<String, String> properties, TableSchema schema, List<String> partitionKeys) { Map<String, String> ret = new HashMap<>(properties); DescriptorProperties descriptorProperties = new DescriptorProperties(); descriptorProperties.putTableSchema(Schema.SCHEMA, schema); descriptorProperties.putPartitionKeys(partitionKeys); descriptorProperties.asMap().keySet().forEach(ret::remove); return ret; }
Example 11
Source File: HBaseConnectorITCase.java From flink with Apache License 2.0 | 4 votes |
@Test public void testTableSink() throws Exception { HBaseTableSchema schema = new HBaseTableSchema(); schema.addColumn(FAMILY1, F1COL1, Integer.class); schema.addColumn(FAMILY2, F2COL1, String.class); schema.addColumn(FAMILY2, F2COL2, Long.class); schema.setRowKey("rk", Integer.class); schema.addColumn(FAMILY3, F3COL1, Double.class); schema.addColumn(FAMILY3, F3COL2, Boolean.class); schema.addColumn(FAMILY3, F3COL3, String.class); Map<String, String> tableProperties = new HashMap<>(); tableProperties.put("connector.type", "hbase"); tableProperties.put("connector.version", "1.4.3"); tableProperties.put("connector.property-version", "1"); tableProperties.put("connector.table-name", TEST_TABLE_2); tableProperties.put("connector.zookeeper.quorum", getZookeeperQuorum()); tableProperties.put("connector.zookeeper.znode.parent", "/hbase"); DescriptorProperties descriptorProperties = new DescriptorProperties(true); descriptorProperties.putTableSchema(SCHEMA, schema.convertsToTableSchema()); descriptorProperties.putProperties(tableProperties); TableSink tableSink = TableFactoryService .find(HBaseTableFactory.class, descriptorProperties.asMap()) .createTableSink(descriptorProperties.asMap()); StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.getExecutionEnvironment(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings); DataStream<Row> ds = execEnv.fromCollection(testData1).returns(testTypeInfo1); tEnv.registerDataStream("src", ds); tEnv.registerTableSink("hbase", tableSink); String query = "INSERT INTO hbase SELECT ROW(f1c1), ROW(f2c1, f2c2), rowkey, ROW(f3c1, f3c2, f3c3) FROM src"; tEnv.sqlUpdate(query); // wait to finish tEnv.execute("HBase Job"); // start a batch scan job to verify contents in HBase table // start a batch scan job to verify contents in HBase table TableEnvironment batchTableEnv = createBatchTableEnv(); HBaseTableSource hbaseTable = new HBaseTableSource(getConf(), TEST_TABLE_2); hbaseTable.setRowKey("rowkey", Integer.class); hbaseTable.addColumn(FAMILY1, F1COL1, Integer.class); hbaseTable.addColumn(FAMILY2, F2COL1, String.class); hbaseTable.addColumn(FAMILY2, F2COL2, Long.class); hbaseTable.addColumn(FAMILY3, F3COL1, Double.class); hbaseTable.addColumn(FAMILY3, F3COL2, Boolean.class); hbaseTable.addColumn(FAMILY3, F3COL3, String.class); batchTableEnv.registerTableSource("hTable", hbaseTable); Table table = batchTableEnv.sqlQuery( "SELECT " + " h.rowkey, " + " h.family1.col1, " + " h.family2.col1, " + " h.family2.col2, " + " h.family3.col1, " + " h.family3.col2, " + " h.family3.col3 " + "FROM hTable AS h" ); List<Row> results = collectBatchResult(table); String expected = "1,10,Hello-1,100,1.01,false,Welt-1\n" + "2,20,Hello-2,200,2.02,true,Welt-2\n" + "3,30,Hello-3,300,3.03,false,Welt-3\n" + "4,40,,400,4.04,true,Welt-4\n" + "5,50,Hello-5,500,5.05,false,Welt-5\n" + "6,60,Hello-6,600,6.06,true,Welt-6\n" + "7,70,Hello-7,700,7.07,false,Welt-7\n" + "8,80,,800,8.08,true,Welt-8\n"; TestBaseUtils.compareResultAsText(results, expected); }
Example 12
Source File: HiveTableUtil.java From flink with Apache License 2.0 | 4 votes |
public static Table instantiateHiveTable(ObjectPath tablePath, CatalogBaseTable table, HiveConf hiveConf) { if (!(table instanceof CatalogTableImpl) && !(table instanceof CatalogViewImpl)) { throw new CatalogException( "HiveCatalog only supports CatalogTableImpl and CatalogViewImpl"); } // let Hive set default parameters for us, e.g. serialization.format Table hiveTable = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(tablePath.getDatabaseName(), tablePath.getObjectName()); hiveTable.setCreateTime((int) (System.currentTimeMillis() / 1000)); Map<String, String> properties = new HashMap<>(table.getProperties()); // Table comment if (table.getComment() != null) { properties.put(HiveCatalogConfig.COMMENT, table.getComment()); } boolean isGeneric = HiveCatalog.isGenericForCreate(properties); // Hive table's StorageDescriptor StorageDescriptor sd = hiveTable.getSd(); HiveTableUtil.setDefaultStorageFormat(sd, hiveConf); if (isGeneric) { DescriptorProperties tableSchemaProps = new DescriptorProperties(true); tableSchemaProps.putTableSchema(Schema.SCHEMA, table.getSchema()); if (table instanceof CatalogTable) { tableSchemaProps.putPartitionKeys(((CatalogTable) table).getPartitionKeys()); } properties.putAll(tableSchemaProps.asMap()); properties = maskFlinkProperties(properties); hiveTable.setParameters(properties); } else { HiveTableUtil.initiateTableFromProperties(hiveTable, properties, hiveConf); List<FieldSchema> allColumns = HiveTableUtil.createHiveColumns(table.getSchema()); // Table columns and partition keys if (table instanceof CatalogTableImpl) { CatalogTable catalogTable = (CatalogTableImpl) table; if (catalogTable.isPartitioned()) { int partitionKeySize = catalogTable.getPartitionKeys().size(); List<FieldSchema> regularColumns = allColumns.subList(0, allColumns.size() - partitionKeySize); List<FieldSchema> partitionColumns = allColumns.subList(allColumns.size() - partitionKeySize, allColumns.size()); sd.setCols(regularColumns); hiveTable.setPartitionKeys(partitionColumns); } else { sd.setCols(allColumns); hiveTable.setPartitionKeys(new ArrayList<>()); } } else { sd.setCols(allColumns); } // Table properties hiveTable.getParameters().putAll(properties); } if (table instanceof CatalogViewImpl) { // TODO: [FLINK-12398] Support partitioned view in catalog API hiveTable.setPartitionKeys(new ArrayList<>()); CatalogView view = (CatalogView) table; hiveTable.setViewOriginalText(view.getOriginalQuery()); hiveTable.setViewExpandedText(view.getExpandedQuery()); hiveTable.setTableType(TableType.VIRTUAL_VIEW.name()); } return hiveTable; }
Example 13
Source File: CatalogTableImpl.java From flink with Apache License 2.0 | 3 votes |
@Override public Map<String, String> toProperties() { DescriptorProperties descriptor = new DescriptorProperties(); descriptor.putTableSchema(Schema.SCHEMA, getSchema()); Map<String, String> properties = new HashMap<>(getProperties()); properties.remove(CatalogConfig.IS_GENERIC); descriptor.putProperties(properties); return descriptor.asMap(); }