Java Code Examples for org.apache.flink.table.api.TableEnvironment#create()
The following examples show how to use
org.apache.flink.table.api.TableEnvironment#create() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CatalogStatisticsTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testGetStatsFromCatalog() throws Exception { EnvironmentSettings settings = EnvironmentSettings.newInstance().useBlinkPlanner().inBatchMode().build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.registerTableSource("T1", new TestTableSource(true, tableSchema)); tEnv.registerTableSource("T2", new TestTableSource(true, tableSchema)); Catalog catalog = tEnv.getCatalog(tEnv.getCurrentCatalog()).orElse(null); assertNotNull(catalog); catalog.alterTableStatistics(ObjectPath.fromString("default_database.T1"), new CatalogTableStatistics(100, 10, 1000L, 2000L), true); catalog.alterTableStatistics(ObjectPath.fromString("default_database.T2"), new CatalogTableStatistics(100000000, 1000, 1000000000L, 2000000000L), true); catalog.alterTableColumnStatistics(ObjectPath.fromString("default_database.T1"), createColumnStats(), true); catalog.alterTableColumnStatistics(ObjectPath.fromString("default_database.T2"), createColumnStats(), true); Table table = tEnv.sqlQuery("select * from T1, T2 where T1.s3 = T2.s3"); String result = tEnv.explain(table); // T1 is broadcast side String expected = TableTestUtil.readFromResource("/explain/testGetStatsFromCatalog.out"); assertEquals(expected, TableTestUtil.replaceStageId(result)); }
Example 2
Source File: BatchSQLTestProgram.java From flink with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { ParameterTool params = ParameterTool.fromArgs(args); String outputPath = params.getRequired("outputPath"); String sqlStatement = params.getRequired("sqlStatement"); TableEnvironment tEnv = TableEnvironment.create(EnvironmentSettings.newInstance() .useBlinkPlanner() .inBatchMode() .build()); ((TableEnvironmentInternal) tEnv).registerTableSourceInternal("table1", new GeneratorTableSource(10, 100, 60, 0)); ((TableEnvironmentInternal) tEnv).registerTableSourceInternal("table2", new GeneratorTableSource(5, 0.2f, 60, 5)); ((TableEnvironmentInternal) tEnv).registerTableSinkInternal("sinkTable", new CsvTableSink(outputPath) .configure(new String[]{"f0", "f1"}, new TypeInformation[]{Types.INT, Types.SQL_TIMESTAMP})); TableResult result = tEnv.executeSql(sqlStatement); // wait job finish result.getJobClient().get().getJobExecutionResult(Thread.currentThread().getContextClassLoader()).get(); }
Example 3
Source File: BlinkBatchPythonUdfSqlJob.java From flink with Apache License 2.0 | 6 votes |
public static void main(String[] args) { TableEnvironment tEnv = TableEnvironment.create( EnvironmentSettings.newInstance().useBlinkPlanner().inBatchMode().build()); tEnv.getConfig().getConfiguration().set(CoreOptions.DEFAULT_PARALLELISM, 1); tEnv.executeSql("create temporary system function add_one as 'add_one.add_one' language python"); tEnv.createTemporaryView("source", tEnv.fromValues(1L, 2L, 3L).as("a")); Iterator<Row> result = tEnv.executeSql("select add_one(a) as a from source").collect(); List<Long> actual = new ArrayList<>(); while (result.hasNext()) { Row r = result.next(); actual.add((Long) r.getField(0)); } List<Long> expected = Arrays.asList(2L, 3L, 4L); if (!actual.equals(expected)) { throw new AssertionError(String.format("The output result: %s is not as expected: %s!", actual, expected)); } }
Example 4
Source File: SpendReportTest.java From flink-playgrounds with Apache License 2.0 | 5 votes |
@Test public void testReport() { EnvironmentSettings settings = EnvironmentSettings.newInstance().inBatchMode().build(); TableEnvironment tEnv = TableEnvironment.create(settings); Table transactions = tEnv.fromValues( DataTypes.ROW( DataTypes.FIELD("account_id", DataTypes.BIGINT()), DataTypes.FIELD("amount", DataTypes.BIGINT()), DataTypes.FIELD("transaction_time", DataTypes.TIMESTAMP(3))), Row.of(1, 188, DATE_TIME.plusMinutes(12)), Row.of(2, 374, DATE_TIME.plusMinutes(47)), Row.of(3, 112, DATE_TIME.plusMinutes(36)), Row.of(4, 478, DATE_TIME.plusMinutes(3)), Row.of(5, 208, DATE_TIME.plusMinutes(8)), Row.of(1, 379, DATE_TIME.plusMinutes(53)), Row.of(2, 351, DATE_TIME.plusMinutes(32)), Row.of(3, 320, DATE_TIME.plusMinutes(31)), Row.of(4, 259, DATE_TIME.plusMinutes(19)), Row.of(5, 273, DATE_TIME.plusMinutes(42))); try { TableResult results = SpendReport.report(transactions).execute(); MatcherAssert.assertThat( materialize(results), Matchers.containsInAnyOrder( Row.of(1L, DATE_TIME, 567L), Row.of(2L, DATE_TIME, 725L), Row.of(3L, DATE_TIME, 432L), Row.of(4L, DATE_TIME, 737L), Row.of(5L, DATE_TIME, 481L))); } catch (UnimplementedException e) { Assume.assumeNoException("The walkthrough has not been implemented", e); } }
Example 5
Source File: HiveLookupJoinITCase.java From flink with Apache License 2.0 | 5 votes |
@Before public void setup() { EnvironmentSettings settings = EnvironmentSettings.newInstance().inStreamingMode().useBlinkPlanner().build(); tableEnv = TableEnvironment.create(settings); hiveCatalog = HiveTestUtils.createHiveCatalog(); tableEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog); tableEnv.useCatalog(hiveCatalog.getName()); }
Example 6
Source File: BuiltInFunctionTestBase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testFunction() { final TableEnvironment env = TableEnvironment.create(EnvironmentSettings.newInstance().build()); final DataTypeFactory dataTypeFactory = ((TableEnvironmentInternal) env) .getCatalogManager() .getDataTypeFactory(); final Table inputTable; if (testSpec.fieldDataTypes == null) { inputTable = env.fromValues(Row.of(testSpec.fieldData)); } else { final DataTypes.UnresolvedField[] fields = IntStream.range(0, testSpec.fieldDataTypes.length) .mapToObj(i -> DataTypes.FIELD("f" + i, testSpec.fieldDataTypes[i])) .toArray(DataTypes.UnresolvedField[]::new); inputTable = env.fromValues(DataTypes.ROW(fields), Row.of(testSpec.fieldData)); } for (TestItem testItem : testSpec.testItems) { try { if (testItem instanceof TableApiResultTestItem) { testTableApiResult(dataTypeFactory, inputTable, ((TableApiResultTestItem) testItem)); } else if (testItem instanceof TableApiErrorTestItem) { testTableApiError(inputTable, ((TableApiErrorTestItem) testItem)); } else if (testItem instanceof SqlResultTestItem) { testSqlResult(dataTypeFactory, env, inputTable, ((SqlResultTestItem) testItem)); } else if (testItem instanceof SqlErrorTestItem) { testSqlError(env, inputTable, ((SqlErrorTestItem) testItem)); } } catch (Throwable t) { throw new AssertionError("Failing test item: " + testItem.toString(), t); } } }
Example 7
Source File: CatalogConstraintTest.java From flink with Apache License 2.0 | 5 votes |
@Before public void setup() { EnvironmentSettings settings = EnvironmentSettings.newInstance().inBatchMode().build(); tEnv = TableEnvironment.create(settings); catalog = tEnv.getCatalog(tEnv.getCurrentCatalog()).orElse(null); assertNotNull(catalog); }
Example 8
Source File: CatalogStatisticsTest.java From flink with Apache License 2.0 | 5 votes |
@Before public void setup() { EnvironmentSettings settings = EnvironmentSettings.newInstance().inBatchMode().build(); tEnv = TableEnvironment.create(settings); catalog = tEnv.getCatalog(tEnv.getCurrentCatalog()).orElse(null); assertNotNull(catalog); }
Example 9
Source File: HiveCatalogITCase.java From flink with Apache License 2.0 | 5 votes |
private TableEnvironment prepareTable(boolean isStreaming) { EnvironmentSettings.Builder builder = EnvironmentSettings.newInstance().useBlinkPlanner(); if (isStreaming) { builder = builder.inStreamingMode(); } else { builder = builder.inBatchMode(); } EnvironmentSettings settings = builder.build(); TableEnvironment tableEnv = TableEnvironment.create(settings); tableEnv.getConfig().getConfiguration().setInteger(TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 1); tableEnv.registerCatalog("myhive", hiveCatalog); tableEnv.useCatalog("myhive"); String srcPath = this.getClass().getResource("/csv/test3.csv").getPath(); tableEnv.executeSql("CREATE TABLE proctime_src (" + "price DECIMAL(10, 2)," + "currency STRING," + "ts6 TIMESTAMP(6)," + "ts AS CAST(ts6 AS TIMESTAMP(3))," + "WATERMARK FOR ts AS ts," + "l_proctime AS PROCTIME( )) " + // test " " in proctime() String.format("WITH (" + "'connector.type' = 'filesystem'," + "'connector.path' = 'file://%s'," + "'format.type' = 'csv')", srcPath)); return tableEnv; }
Example 10
Source File: HiveCatalogITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testReadWriteCsv() throws Exception { // similar to CatalogTableITCase::testReadWriteCsvUsingDDL but uses HiveCatalog EnvironmentSettings settings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build(); TableEnvironment tableEnv = TableEnvironment.create(settings); tableEnv.getConfig().getConfiguration().setInteger(TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 1); tableEnv.registerCatalog("myhive", hiveCatalog); tableEnv.useCatalog("myhive"); String srcPath = this.getClass().getResource("/csv/test3.csv").getPath(); tableEnv.executeSql("CREATE TABLE src (" + "price DECIMAL(10, 2),currency STRING,ts6 TIMESTAMP(6),ts AS CAST(ts6 AS TIMESTAMP(3)),WATERMARK FOR ts AS ts) " + String.format("WITH ('connector.type' = 'filesystem','connector.path' = 'file://%s','format.type' = 'csv')", srcPath)); String sinkPath = new File(tempFolder.newFolder(), "csv-order-sink").toURI().toString(); tableEnv.executeSql("CREATE TABLE sink (" + "window_end TIMESTAMP(3),max_ts TIMESTAMP(6),counter BIGINT,total_price DECIMAL(10, 2)) " + String.format("WITH ('connector.type' = 'filesystem','connector.path' = '%s','format.type' = 'csv')", sinkPath)); TableEnvUtil.execInsertSqlAndWaitResult(tableEnv, "INSERT INTO sink " + "SELECT TUMBLE_END(ts, INTERVAL '5' SECOND),MAX(ts6),COUNT(*),MAX(price) FROM src " + "GROUP BY TUMBLE(ts, INTERVAL '5' SECOND)"); String expected = "2019-12-12 00:00:05.0,2019-12-12 00:00:04.004001,3,50.00\n" + "2019-12-12 00:00:10.0,2019-12-12 00:00:06.006001,2,5.33\n"; assertEquals(expected, FileUtils.readFileUtf8(new File(new URI(sinkPath)))); }
Example 11
Source File: JdbcDynamicTableSinkITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testBatchSink() throws Exception { EnvironmentSettings bsSettings = EnvironmentSettings.newInstance() .useBlinkPlanner().inBatchMode().build(); TableEnvironment tEnv = TableEnvironment.create(bsSettings); tEnv.executeSql( "CREATE TABLE USER_RESULT(" + "NAME VARCHAR," + "SCORE BIGINT" + ") WITH ( " + "'connector' = 'jdbc'," + "'url'='" + DB_URL + "'," + "'table-name' = '" + OUTPUT_TABLE3 + "'," + "'sink.buffer-flush.max-rows' = '2'," + "'sink.buffer-flush.interval' = '300ms'," + "'sink.max-retries' = '4'" + ")"); TableResult tableResult = tEnv.executeSql("INSERT INTO USER_RESULT\n" + "SELECT user_name, score " + "FROM (VALUES (1, 'Bob'), (22, 'Tom'), (42, 'Kim'), " + "(42, 'Kim'), (1, 'Bob')) " + "AS UserCountTable(score, user_name)"); // wait to finish tableResult.getJobClient().get().getJobExecutionResult(Thread.currentThread().getContextClassLoader()).get(); check(new Row[] { Row.of("Bob", 1), Row.of("Tom", 22), Row.of("Kim", 42), Row.of("Kim", 42), Row.of("Bob", 1) }, DB_URL, OUTPUT_TABLE3, new String[]{"NAME", "SCORE"}); }
Example 12
Source File: PostgresCatalogITCase.java From flink with Apache License 2.0 | 5 votes |
@Before public void setup() { EnvironmentSettings settings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build(); this.tEnv = TableEnvironment.create(settings); tEnv.getConfig().getConfiguration().setInteger(TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM.key(), 1); // use PG catalog tEnv.registerCatalog(TEST_CATALOG_NAME, catalog); tEnv.useCatalog(TEST_CATALOG_NAME); }
Example 13
Source File: SqlSubmit.java From flink-sql-submit with Apache License 2.0 | 5 votes |
private void run() throws Exception { EnvironmentSettings settings = EnvironmentSettings.newInstance() .useBlinkPlanner() .inStreamingMode() .build(); this.tEnv = TableEnvironment.create(settings); List<String> sql = Files.readAllLines(Paths.get(workSpace + "/" + sqlFilePath)); List<SqlCommandCall> calls = SqlCommandParser.parse(sql); for (SqlCommandCall call : calls) { callCommand(call); } tEnv.execute("SQL Job"); }
Example 14
Source File: HBaseConnectorITCase.java From flink with Apache License 2.0 | 5 votes |
/** * Creates a Batch {@link TableEnvironment} depends on the {@link #planner} context. */ private TableEnvironment createBatchTableEnv() { if (OLD_PLANNER.equals(planner)) { ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); return BatchTableEnvironment.create(env, new TableConfig()); } else { return TableEnvironment.create(batchSettings); } }
Example 15
Source File: SpendReport.java From flink-playgrounds with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { EnvironmentSettings settings = EnvironmentSettings.newInstance().build(); TableEnvironment tEnv = TableEnvironment.create(settings); tEnv.executeSql("CREATE TABLE transactions (\n" + " account_id BIGINT,\n" + " amount BIGINT,\n" + " transaction_time TIMESTAMP(3),\n" + " WATERMARK FOR transaction_time AS transaction_time - INTERVAL '5' SECOND\n" + ") WITH (\n" + " 'connector' = 'kafka',\n" + " 'topic' = 'transactions',\n" + " 'properties.bootstrap.servers' = 'kafka:9092',\n" + " 'format' = 'csv'\n" + ")"); tEnv.executeSql("CREATE TABLE spend_report (\n" + " account_id BIGINT,\n" + " log_ts TIMESTAMP(3),\n" + " amount BIGINT\n," + " PRIMARY KEY (account_id, log_ts) NOT ENFORCED" + ") WITH (\n" + " 'connector' = 'jdbc',\n" + " 'url' = 'jdbc:mysql://mysql:3306/sql-demo',\n" + " 'table-name' = 'spend_report',\n" + " 'driver' = 'com.mysql.jdbc.Driver',\n" + " 'username' = 'sql-demo',\n" + " 'password' = 'demo-sql'\n" + ")"); Table transactions = tEnv.from("transactions"); report(transactions).executeInsert("spend_report"); }
Example 16
Source File: TpcdsTestProgram.java From flink with Apache License 2.0 | 4 votes |
/** * Prepare TableEnvironment for query. * * @param sourceTablePath * @return */ private static TableEnvironment prepareTableEnv(String sourceTablePath, Boolean useTableStats) { //init Table Env EnvironmentSettings environmentSettings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(environmentSettings); //config Optimizer parameters tEnv.getConfig().getConfiguration() .setInteger(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 4); tEnv.getConfig().getConfiguration() .setLong(OptimizerConfigOptions.TABLE_OPTIMIZER_BROADCAST_JOIN_THRESHOLD, 10 * 1024 * 1024); tEnv.getConfig().getConfiguration() .setBoolean(OptimizerConfigOptions.TABLE_OPTIMIZER_JOIN_REORDER_ENABLED, true); //register TPC-DS tables TPCDS_TABLES.forEach(table -> { TpcdsSchema schema = TpcdsSchemaProvider.getTableSchema(table); CsvTableSource.Builder builder = CsvTableSource.builder(); builder.path(sourceTablePath + FILE_SEPARATOR + table + DATA_SUFFIX); for (int i = 0; i < schema.getFieldNames().size(); i++) { builder.field( schema.getFieldNames().get(i), TypeConversions.fromDataTypeToLegacyInfo(schema.getFieldTypes().get(i))); } builder.fieldDelimiter(COL_DELIMITER); builder.emptyColumnAsNull(); builder.lineDelimiter("\n"); CsvTableSource tableSource = builder.build(); ConnectorCatalogTable catalogTable = ConnectorCatalogTable.source(tableSource, true); tEnv.getCatalog(tEnv.getCurrentCatalog()).ifPresent(catalog -> { try { catalog.createTable(new ObjectPath(tEnv.getCurrentDatabase(), table), catalogTable, false); } catch (Exception e) { throw new RuntimeException(e); } }); }); // register statistics info if (useTableStats) { TpcdsStatsProvider.registerTpcdsStats(tEnv); } return tEnv; }
Example 17
Source File: HiveCatalogITCase.java From flink with Apache License 2.0 | 4 votes |
@Test public void testCsvTableViaAPI() throws Exception { EnvironmentSettings settings = EnvironmentSettings.newInstance().useBlinkPlanner().inBatchMode().build(); TableEnvironment tableEnv = TableEnvironment.create(settings); tableEnv.getConfig().addConfiguration(new Configuration().set(CoreOptions.DEFAULT_PARALLELISM, 1)); tableEnv.registerCatalog("myhive", hiveCatalog); tableEnv.useCatalog("myhive"); TableSchema schema = TableSchema.builder() .field("name", DataTypes.STRING()) .field("age", DataTypes.INT()) .build(); FormatDescriptor format = new OldCsv() .field("name", Types.STRING()) .field("age", Types.INT()); CatalogTable source = new CatalogTableBuilder( new FileSystem().path(this.getClass().getResource("/csv/test.csv").getPath()), schema) .withFormat(format) .inAppendMode() .withComment("Comment.") .build(); Path p = Paths.get(tempFolder.newFolder().getAbsolutePath(), "test.csv"); CatalogTable sink = new CatalogTableBuilder( new FileSystem().path(p.toAbsolutePath().toString()), schema) .withFormat(format) .inAppendMode() .withComment("Comment.") .build(); hiveCatalog.createTable( new ObjectPath(HiveCatalog.DEFAULT_DB, sourceTableName), source, false ); hiveCatalog.createTable( new ObjectPath(HiveCatalog.DEFAULT_DB, sinkTableName), sink, false ); Table t = tableEnv.sqlQuery( String.format("select * from myhive.`default`.%s", sourceTableName)); List<Row> result = Lists.newArrayList(t.execute().collect()); result.sort(Comparator.comparing(String::valueOf)); // assert query result assertEquals( Arrays.asList( Row.of("1", 1), Row.of("2", 2), Row.of("3", 3)), result ); TableEnvUtil.execInsertSqlAndWaitResult(tableEnv, String.format("insert into myhive.`default`.%s select * from myhive.`default`.%s", sinkTableName, sourceTableName)); // assert written result File resultFile = new File(p.toAbsolutePath().toString()); BufferedReader reader = new BufferedReader(new FileReader(resultFile)); String readLine; for (int i = 0; i < 3; i++) { readLine = reader.readLine(); assertEquals(String.format("%d,%d", i + 1, i + 1), readLine); } // No more line assertNull(reader.readLine()); tableEnv.executeSql(String.format("DROP TABLE %s", sourceTableName)); tableEnv.executeSql(String.format("DROP TABLE %s", sinkTableName)); }
Example 18
Source File: SqlParserHelper.java From flink with Apache License 2.0 | 4 votes |
public SqlParserHelper() { tableEnv = TableEnvironment.create(EnvironmentSettings.newInstance().build()); }
Example 19
Source File: HiveTestUtils.java From flink with Apache License 2.0 | 4 votes |
public static TableEnvironment createTableEnv() { EnvironmentSettings settings = EnvironmentSettings.newInstance().useBlinkPlanner().inBatchMode().build(); TableEnvironment tableEnv = TableEnvironment.create(settings); tableEnv.getConfig().getConfiguration().setInteger(TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM.key(), 1); return tableEnv; }
Example 20
Source File: SqlConnect.java From flink-simple-tutorial with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { EnvironmentSettings settings = EnvironmentSettings.newInstance() .useBlinkPlanner() .inStreamingMode() .build(); // 创建一个使用 Blink Planner 的 TableEnvironment, 并工作在流模式 TableEnvironment tEnv = TableEnvironment.create(settings); String kafkaSourceSql = "CREATE TABLE log (\n" + " t INT, \n" + " user_name VARCHAR,\n" + " cnt INT\n" + ") WITH (\n" + " 'connector.type' = 'kafka',\n" + " 'connector.version' = 'universal',\n" + " 'connector.topic' = 'flink',\n" + " 'connector.startup-mode' = 'latest-offset',\n" + " 'connector.properties.0.key' = 'group.id',\n" + " 'connector.properties.0.value' = 'testGroup',\n" + " 'connector.properties.1.key' = 'bootstrap.servers',\n" + " 'connector.properties.1.value' = '192.168.56.103:9092',\n" + " 'connector.specific-offsets.0.partition' = '0',\n" + " 'connector.specific-offsets.0.offset' = '0',\n" + " 'update-mode' = 'append',\n" + " 'format.type' = 'json',\n" + " 'format.derive-schema' = 'true'\n" + ")"; String mysqlSinkSql = "CREATE TABLE sink (\n" + " t INT,\n" + " user_name VARCHAR,\n" + " total INT\n" + ") WITH (\n" + " 'connector.type' = 'jdbc',\n" + " 'connector.url' = 'jdbc:mysql://192.168.56.103:3306/flink',\n" + " 'connector.table' = 'log',\n" + " 'connector.username' = 'root',\n" + " 'connector.password' = '123456',\n" + " 'connector.write.flush.max-rows' = '1'\n" + ")"; // 1. 连接kafka构建源表 tEnv.sqlUpdate(kafkaSourceSql); // 2. 定义要输出的表 tEnv.sqlUpdate(mysqlSinkSql); // 3. 自定义具体的 DML 操作 tEnv.sqlUpdate("INSERT INTO sink " + "SELECT * from log where cnt=100"); tEnv.execute("SQL Job"); }