org.apache.flink.table.api.config.OptimizerConfigOptions Java Examples
The following examples show how to use
org.apache.flink.table.api.config.OptimizerConfigOptions.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ExecutionContextTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testConfiguration() throws Exception { final ExecutionContext<?> context = createConfigurationExecutionContext(); final TableEnvironment tableEnv = context.getTableEnvironment(); Configuration conf = tableEnv.getConfig().getConfiguration(); assertEquals(100, conf.getInteger(ExecutionConfigOptions.TABLE_EXEC_SORT_DEFAULT_LIMIT)); assertTrue(conf.getBoolean(ExecutionConfigOptions.TABLE_EXEC_SPILL_COMPRESSION_ENABLED)); assertEquals("128kb", conf.getString(ExecutionConfigOptions.TABLE_EXEC_SPILL_COMPRESSION_BLOCK_SIZE)); assertTrue(conf.getBoolean(OptimizerConfigOptions.TABLE_OPTIMIZER_JOIN_REORDER_ENABLED)); // these options are not modified and should be equal to their default value assertEquals( ExecutionConfigOptions.TABLE_EXEC_SORT_ASYNC_MERGE_ENABLED.defaultValue(), conf.getBoolean(ExecutionConfigOptions.TABLE_EXEC_SORT_ASYNC_MERGE_ENABLED)); assertEquals( ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE.defaultValue(), conf.getString(ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE)); assertEquals( OptimizerConfigOptions.TABLE_OPTIMIZER_BROADCAST_JOIN_THRESHOLD.defaultValue().longValue(), conf.getLong(OptimizerConfigOptions.TABLE_OPTIMIZER_BROADCAST_JOIN_THRESHOLD)); }
Example #2
Source File: ExecutionContextTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testConfiguration() throws Exception { final ExecutionContext<?> context = createConfigurationExecutionContext(); final TableEnvironment tableEnv = context.createEnvironmentInstance().getTableEnvironment(); assertEquals( 100, tableEnv.getConfig().getConfiguration().getInteger( ExecutionConfigOptions.TABLE_EXEC_SORT_DEFAULT_LIMIT)); assertTrue( tableEnv.getConfig().getConfiguration().getBoolean( ExecutionConfigOptions.TABLE_EXEC_SPILL_COMPRESSION_ENABLED)); assertEquals( "128kb", tableEnv.getConfig().getConfiguration().getString( ExecutionConfigOptions.TABLE_EXEC_SPILL_COMPRESSION_BLOCK_SIZE)); assertTrue( tableEnv.getConfig().getConfiguration().getBoolean( OptimizerConfigOptions.TABLE_OPTIMIZER_JOIN_REORDER_ENABLED)); // these options are not modified and should be equal to their default value assertEquals( ExecutionConfigOptions.TABLE_EXEC_SORT_ASYNC_MERGE_ENABLED.defaultValue(), tableEnv.getConfig().getConfiguration().getBoolean( ExecutionConfigOptions.TABLE_EXEC_SORT_ASYNC_MERGE_ENABLED)); assertEquals( ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE.defaultValue(), tableEnv.getConfig().getConfiguration().getString( ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE)); assertEquals( OptimizerConfigOptions.TABLE_OPTIMIZER_BROADCAST_JOIN_THRESHOLD.defaultValue().longValue(), tableEnv.getConfig().getConfiguration().getLong( OptimizerConfigOptions.TABLE_OPTIMIZER_BROADCAST_JOIN_THRESHOLD)); }
Example #3
Source File: FlinkSqlInterrpeter.java From zeppelin with Apache License 2.0 | 5 votes |
private Map<String, ConfigOption> extractTableConfigOptions() { Map<String, ConfigOption> configOptions = new HashMap<>(); configOptions.putAll(extractConfigOptions(ExecutionConfigOptions.class)); configOptions.putAll(extractConfigOptions(OptimizerConfigOptions.class)); configOptions.putAll(extractConfigOptions(PythonOptions.class)); return configOptions; }
Example #4
Source File: LocalExecutorITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testSetSessionProperties() throws Exception { final LocalExecutor executor = createDefaultExecutor(clusterClient); String key = OptimizerConfigOptions.TABLE_OPTIMIZER_AGG_PHASE_STRATEGY.key(); final SessionContext session = new SessionContext("test-session", new Environment()); String sessionId = executor.openSession(session); // check the config in Environment assertNull(executor.getSessionProperties(sessionId).get(key)); // check the config in TableConfig assertNull(executor.getExecutionContext(sessionId) .getTableEnvironment().getConfig().getConfiguration().getString(key, null)); // modify config executor.setSessionProperty(sessionId, key, "ONE_PHASE"); // check the config in Environment again assertEquals("ONE_PHASE", executor.getSessionProperties(sessionId).get(key)); // check the config in TableConfig again assertEquals("ONE_PHASE", executor.getExecutionContext(sessionId) .getTableEnvironment().getConfig().getConfiguration().getString(key, null)); // reset all properties executor.resetSessionProperties(sessionId); // check the config in Environment assertNull(executor.getSessionProperties(sessionId).get(key)); // check the config in TableConfig assertNull(executor.getExecutionContext(sessionId) .getTableEnvironment().getConfig().getConfiguration().getString(key, null)); }
Example #5
Source File: FlinkBatchSqlInterpreterTest.java From zeppelin with Apache License 2.0 | 4 votes |
@Test public void testSetTableConfig() throws InterpreterException, IOException { hiveShell.execute("create table source_table (id int, name string)"); hiveShell.execute("insert into source_table values(1, 'a'), (2, 'b')"); File destDir = Files.createTempDirectory("flink_test").toFile(); FileUtils.deleteDirectory(destDir); InterpreterResult result = sqlInterpreter.interpret( "CREATE TABLE sink_table (\n" + "id int,\n" + "name string" + ") WITH (\n" + "'format.field-delimiter'=',',\n" + "'connector.type'='filesystem',\n" + "'format.derive-schema'='true',\n" + "'connector.path'='" + destDir.getAbsolutePath() + "',\n" + "'format.type'='csv'\n" + ");", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); // set parallelism then insert into InterpreterContext context = getInterpreterContext(); result = sqlInterpreter.interpret( "set table.exec.resource.default-parallelism=10;" + "insert into sink_table select * from source_table", context); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage(); assertEquals("Insertion successfully.\n", resultMessages.get(0).getData()); assertEquals(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM.defaultValue(), sqlInterpreter.tbenv.getConfig().getConfiguration().get(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM)); // set then insert into destDir.delete(); context = getInterpreterContext(); result = sqlInterpreter.interpret( "set table.optimizer.source.predicate-pushdown-enabled=false;" + "insert into sink_table select * from source_table", context); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); resultMessages = context.out.toInterpreterResultMessage(); assertEquals("Insertion successfully.\n", resultMessages.get(0).getData()); assertEquals(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM.defaultValue(), sqlInterpreter.tbenv.getConfig().getConfiguration().get(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM)); assertEquals(OptimizerConfigOptions.TABLE_OPTIMIZER_SOURCE_PREDICATE_PUSHDOWN_ENABLED.defaultValue(), sqlInterpreter.tbenv.getConfig().getConfiguration().get(OptimizerConfigOptions.TABLE_OPTIMIZER_SOURCE_PREDICATE_PUSHDOWN_ENABLED)); // invalid config destDir.delete(); context = getInterpreterContext(); result = sqlInterpreter.interpret( "set table.invalid_config=false;" + "insert into sink_table select * from source_table", context); assertEquals(InterpreterResult.Code.ERROR, result.code()); resultMessages = context.out.toInterpreterResultMessage(); assertTrue(resultMessages.get(0).getData(), resultMessages.get(0).getData().contains("table.invalid_config is not a valid table/sql config")); }
Example #6
Source File: TpcdsTestProgram.java From flink with Apache License 2.0 | 4 votes |
/** * Prepare TableEnvironment for query. * * @param sourceTablePath * @return */ private static TableEnvironment prepareTableEnv(String sourceTablePath, Boolean useTableStats) { //init Table Env EnvironmentSettings environmentSettings = EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode() .build(); TableEnvironment tEnv = TableEnvironment.create(environmentSettings); //config Optimizer parameters tEnv.getConfig().getConfiguration() .setInteger(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 4); tEnv.getConfig().getConfiguration() .setLong(OptimizerConfigOptions.TABLE_OPTIMIZER_BROADCAST_JOIN_THRESHOLD, 10 * 1024 * 1024); tEnv.getConfig().getConfiguration() .setBoolean(OptimizerConfigOptions.TABLE_OPTIMIZER_JOIN_REORDER_ENABLED, true); //register TPC-DS tables TPCDS_TABLES.forEach(table -> { TpcdsSchema schema = TpcdsSchemaProvider.getTableSchema(table); CsvTableSource.Builder builder = CsvTableSource.builder(); builder.path(sourceTablePath + FILE_SEPARATOR + table + DATA_SUFFIX); for (int i = 0; i < schema.getFieldNames().size(); i++) { builder.field( schema.getFieldNames().get(i), TypeConversions.fromDataTypeToLegacyInfo(schema.getFieldTypes().get(i))); } builder.fieldDelimiter(COL_DELIMITER); builder.emptyColumnAsNull(); builder.lineDelimiter("\n"); CsvTableSource tableSource = builder.build(); ConnectorCatalogTable catalogTable = ConnectorCatalogTable.source(tableSource, true); tEnv.getCatalog(tEnv.getCurrentCatalog()).ifPresent(catalog -> { try { catalog.createTable(new ObjectPath(tEnv.getCurrentDatabase(), table), catalogTable, false); } catch (Exception e) { throw new RuntimeException(e); } }); }); // register statistics info if (useTableStats) { TpcdsStatsProvider.registerTpcdsStats(tEnv); } return tEnv; }