org.apache.kafka.common.config.ConfigValue Java Examples
The following examples show how to use
org.apache.kafka.common.config.ConfigValue.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MongoDbSinkConnector.java From MongoDb-Sink-Connector with Apache License 2.0 | 6 votes |
@Override public void start(Map<String, String> props) { List<String> errorMessages = new ArrayList<>(); for (ConfigValue v : config().validate(props)) { if (!v.errorMessages().isEmpty()) { errorMessages.add("Property " + v.name() + " with value " + v.value() + " does not validate: " + String.join("; ", v.errorMessages())); } } if (!errorMessages.isEmpty()) { throw new ConfigException("Configuration does not validate: \n\t" + String.join("\n\t", errorMessages)); } connectorConfig = new HashMap<>(props); logger.info(Utility.convertConfigToString(connectorConfig)); }
Example #2
Source File: ClickHouseConfigDef.java From kafka-connectors with Apache License 2.0 | 6 votes |
/** * 校验输入的日期格式如果不为空其个数是否与sink table 一致且是否是正确的日期格式化格式 */ public static void validateDateFormat(Map<String, ConfigValue> configValues, JdbcDataSource dataSource, ConfigValue sinkTablesVal) { ConfigValue dateFormat = configValues.get(CLICKHOUSE_SOURCE_DATE_FORMAT); if (sinkTablesVal.errorMessages().isEmpty() && sinkTablesVal.value() != null && dataSource != null) { if (dateFormat.errorMessages().isEmpty() && dateFormat.value() != null) { String[] sinkTabArr = ((String) sinkTablesVal.value()).split(CONFIG_SEPARATOR); String[] dateFormatArr = ((String) dateFormat.value()).split(CONFIG_SEPARATOR); if (dateFormatArr.length != sinkTabArr.length) { dateFormat.addErrorMessage(dateFormat.name() + "需要与sink table个数一一对应"); return; } for (String df : dateFormatArr) { try { SimpleDateFormat sdf = new SimpleDateFormat(df); sdf.format(new Date()); } catch (Exception e) { logger.error("校验输入时间格式: ", e); dateFormat.addErrorMessage(String.format("%s, 值: %s, 不是正确的日期格式化格式", dateFormat.name(), df)); } } } } }
Example #3
Source File: ClickHouseConfigDef.java From kafka-connectors with Apache License 2.0 | 6 votes |
/** * 校验sink的时间字段个数是否正确;时间字段是否存在对应的表中 */ public static void validateSinkDateColumns(Map<String, ConfigValue> configValues, JdbcDataSource dataSource, String sinkDb, ConfigValue sinkTablesVal) { ConfigValue sinkDateCols = configValues.get(CLICKHOUSE_SINK_DATE_COLUMNS); if (sinkTablesVal.errorMessages().isEmpty() && sinkTablesVal.value() != null && dataSource != null) { if (sinkDateCols.errorMessages().isEmpty() && sinkDateCols.value() != null) { String[] sinkTabArr = ((String) sinkTablesVal.value()).split(CONFIG_SEPARATOR); String[] sinkDateColArr = ((String) sinkDateCols.value()).split(CONFIG_SEPARATOR); if (sinkDateColArr.length != sinkTabArr.length) { sinkDateCols.addErrorMessage(sinkTablesVal.name() + "需要与sink table个数一一对应"); return; } for (int i = 0; i < sinkTabArr.length; i++) { try { List<String> columns = dataSource.descTable(String.format("`%s`.`%s`", sinkDb, sinkTabArr[i])); if (!columns.contains(sinkDateColArr[i])) { sinkDateCols.addErrorMessage(String.format("%s, 字段: %s, 不存在table: %s中", sinkTablesVal.name(), sinkDateColArr[i], sinkTabArr[i])); } } catch (SQLException e) { logger.error("查询ClickHouse失败,", e); sinkDateCols.addErrorMessage(String.format("%s, 查询ClickHouse失败: %s", sinkDateCols.name(), e.getMessage())); } } } } }
Example #4
Source File: ClickHouseConfigDef.java From kafka-connectors with Apache License 2.0 | 6 votes |
/** * 对本地表是否存在ClickHouse进行校验, 本地表会在每个连接下进行校验 */ public static void localTableExist(JdbcDataSource dataSource, ConfigValue sinkLocalTablesVal, String sinkDb) { if (sinkLocalTablesVal.errorMessages().isEmpty() && sinkLocalTablesVal.value() != null && dataSource != null) { String sinkLocalTables = (String) sinkLocalTablesVal.value(); String[] sinkLocalTabArr = sinkLocalTables.split(CONFIG_SEPARATOR); for (String tab : sinkLocalTabArr) { try { if (!dataSource.existAllDs(sinkDb, tab)) { sinkLocalTablesVal.addErrorMessage(String.format("%s, 表: %s, 不存在", sinkLocalTablesVal.name(), tab)); } } catch (SQLException e) { logger.error("查询ClickHouse失败,", e); sinkLocalTablesVal.addErrorMessage(String.format("%s, 查询ClickHouse失败: %s", sinkLocalTablesVal.name(), e.getMessage())); } } } }
Example #5
Source File: ClickHouseConfigDef.java From kafka-connectors with Apache License 2.0 | 6 votes |
/** * 对分布式表是否存在ClickHouse进行校验 */ public static void clusterTableExist(JdbcDataSource dataSource, ConfigValue sinkTablesVal, String sinkDb) { if (sinkTablesVal.errorMessages().isEmpty() && sinkTablesVal.value() != null && dataSource != null) { String sinkTables = (String) sinkTablesVal.value(); try { List<String> showTables = dataSource.showTables(sinkDb); String[] sinkTabArr = sinkTables.split(CONFIG_SEPARATOR); for (String tab : sinkTabArr) { if (!showTables.contains(tab)) { sinkTablesVal.addErrorMessage(String.format("%s, 表: %s, 不存在", sinkTablesVal.name(), tab)); } } } catch (SQLException e) { sinkTablesVal.addErrorMessage(String.format("%s, 查询ClickHouse失败: %s", sinkTablesVal.name(), e.getMessage())); logger.error("查询ClickHouse失败,", e); } } }
Example #6
Source File: ClickHouseConfigDef.java From kafka-connectors with Apache License 2.0 | 6 votes |
/** * 所有参数非空校验 */ public static Map<String, ConfigValue> emptyValidate(Map<String, String> connectorConfigs, ConfigDef configDef) { Map<String, ConfigValue> configValues = new HashMap<>(16); for (ConfigDef.ConfigKey configKey : configDef.configKeys().values()) { ConfigValue configValue = new ConfigValue(configKey.name); String value = connectorConfigs.get(configKey.name); if (configKey.importance != ConfigDef.Importance.LOW) { if (StringUtils.isEmpty(value)) { configValue.addErrorMessage(String.format("%s不能为空", configKey.name)); } } configValue.value(value); configValues.put(configKey.name, configValue); } return configValues; }
Example #7
Source File: JsonSinkClickHouseConnector.java From kafka-connectors with Apache License 2.0 | 6 votes |
private void validateTables(ConfigValue sinkTablesVal, ConfigValue sinkLocalTablesVal, String topics) { if (sinkTablesVal.value() != null && StringUtils.isNotEmpty(topics)) { String sinkTables = (String) sinkTablesVal.value(); String[] sinkTabArr = sinkTables.split(CONFIG_SEPARATOR); String[] topicsArr = topics.split(CONFIG_SEPARATOR); if (topicsArr.length != sinkTabArr.length) { sinkTablesVal.addErrorMessage(sinkTablesVal.name() + "需要与topic个数一一对应"); } if (sinkLocalTablesVal.value() != null) { String[] sinkLocalTabArr = ((String) sinkLocalTablesVal.value()).split(CONFIG_SEPARATOR); if (topicsArr.length != sinkLocalTabArr.length) { sinkLocalTablesVal.addErrorMessage(sinkLocalTablesVal.name() + "需要与topic个数一一对应"); } } } }
Example #8
Source File: JsonSinkClickHouseConnector.java From kafka-connectors with Apache License 2.0 | 6 votes |
@Override public Config validate(Map<String, String> connectorConfigs) { ConfigDef configDef = config(); Map<String, ConfigValue> configValues = ClickHouseConfigDef.emptyValidate(connectorConfigs, configDef); JdbcDataSource dataSource = ClickHouseConfigDef.clickHouseConnectValidate(configValues); String sinkDb = connectorConfigs.get(CLICKHOUSE_SINK_DATABASE); ConfigValue sinkTablesVal = configValues.get(CLICKHOUSE_SINK_TABLES); ConfigValue sinkLocalTablesVal = configValues.get(CLICKHOUSE_SINK_LOCAL_TABLES); String topics = connectorConfigs.get(TOPICS); validateTables(sinkTablesVal, sinkLocalTablesVal, topics); ClickHouseConfigDef.clusterTableExist(dataSource, sinkTablesVal, sinkDb); ClickHouseConfigDef.localTableExist(dataSource, sinkLocalTablesVal, sinkDb); ClickHouseConfigDef.validateSinkDateColumns(configValues, dataSource, sinkDb, sinkTablesVal); ClickHouseConfigDef.validateSourceDateColumns(configValues, dataSource, sinkTablesVal); ClickHouseConfigDef.validateDateFormat(configValues, dataSource, sinkTablesVal); if (dataSource != null) { dataSource.close(); } return new Config(new LinkedList<>(configValues.values())); }
Example #9
Source File: MongoSinkTopicConfig.java From mongo-kafka with Apache License 2.0 | 6 votes |
static Map<String, ConfigValue> validateRegexAll(final Map<String, String> props) { Map<String, ConfigValue> results = new HashMap<>(); AtomicBoolean containsError = new AtomicBoolean(); Map<String, String> sinkTopicOriginals = createSinkTopicOriginals(props); CONFIG .validateAll(sinkTopicOriginals) .forEach( (k, v) -> { if (!v.errorMessages().isEmpty()) { containsError.set(true); } results.put( k, new ConfigValue(k, v.value(), v.recommendedValues(), v.errorMessages())); }); props.keySet().stream() .filter(k -> k.startsWith(TOPIC_OVERRIDE_PREFIX)) .map(k -> k.substring(TOPIC_OVERRIDE_PREFIX.length()).split("\\.")[0]) .forEach(t -> results.putAll(validateAll(t, props))); return results; }
Example #10
Source File: MySqlSinkClickHouseConnector.java From kafka-connectors with Apache License 2.0 | 6 votes |
/** * 对输入的本地表进行校验 * 如果输入不为空: * * 如果输入的分布式表不为空, 则判断其是否与分布式表一一对应 * * 如果输入的分布式表为空, 则判断其是否与topic个数一一对应 */ private void localTableValidate(ConfigValue sinkTablesVal, String topics, ConfigValue sinkLocalTablesVal) { if (sinkLocalTablesVal.value() != null) { String sinkLocalTables = (String) sinkLocalTablesVal.value(); String[] sinkLocalTabArr = sinkLocalTables.split(CONFIG_SEPARATOR); if (sinkTablesVal.value() != null) { String sinkTables = (String) sinkTablesVal.value(); if (sinkLocalTabArr.length != sinkTables.split(CONFIG_SEPARATOR).length) { sinkLocalTablesVal.addErrorMessage(sinkLocalTablesVal.name() + "需要与" + sinkTablesVal.name() + "一一对应"); } } else { String[] topicsArr = topics.split(CONFIG_SEPARATOR); if (topicsArr.length != sinkLocalTabArr.length) { sinkLocalTablesVal.addErrorMessage(sinkLocalTablesVal.name() + "需要与topic个数一一对应"); } } } }
Example #11
Source File: ConfigDefTest.java From kafka-connectors with Apache License 2.0 | 6 votes |
@Test public void testValidateCannotParse() { Map<String, ConfigValue> expected = new HashMap<>(); String errorMessageB = "Invalid value non_integer for configuration a: Not a number of type INT"; ConfigValue configA = new ConfigValue("a", null, Collections.emptyList(), Arrays.asList(errorMessageB)); expected.put("a", configA); ConfigDef def = new ConfigDef().define("a", Type.INT, Importance.HIGH, "docs"); Map<String, String> props = new HashMap<>(); props.put("a", "non_integer"); List<ConfigValue> configs = def.validate(props); for (ConfigValue config: configs) { String name = config.name(); ConfigValue expectedConfig = expected.get(name); assertEquals(expectedConfig, config); } }
Example #12
Source File: ConnectorTest.java From snowflake-kafka-connector with Apache License 2.0 | 5 votes |
private static void assertPropNoError(final Map<String, ConfigValue> validateMap, final String[] propArray) { List<String> propList = Arrays.asList(propArray); for (String prop : allProperties) { if (propList.contains(prop)) { assert validateMap.get(prop).errorMessages().isEmpty(); } else { assert !validateMap.get(prop).errorMessages().isEmpty(); } } }
Example #13
Source File: ConnectorTest.java From snowflake-kafka-connector with Apache License 2.0 | 5 votes |
@Test public void testValidateErrorConfig() { Map<String, ConfigValue> validateMap = toValidateMap(getErrorConfig()); // all single field validation has error assertPropNoError(validateMap, new String[]{ SnowflakeSinkConnectorConfig.SNOWFLAKE_PRIVATE_KEY, SnowflakeSinkConnectorConfig.JVM_PROXY_PORT, SnowflakeSinkConnectorConfig.JVM_PROXY_HOST, SnowflakeSinkConnectorConfig.SNOWFLAKE_PRIVATE_KEY_PASSPHRASE }); }
Example #14
Source File: ConnectorTest.java From snowflake-kafka-connector with Apache License 2.0 | 5 votes |
@Test public void testValidateEmptyConfig() { Map<String, ConfigValue> validateMap = toValidateMap(getEmptyConfig()); assertPropHasError(validateMap, new String[]{ SnowflakeSinkConnectorConfig.SNOWFLAKE_USER, SnowflakeSinkConnectorConfig.SNOWFLAKE_URL, SnowflakeSinkConnectorConfig.SNOWFLAKE_SCHEMA, SnowflakeSinkConnectorConfig.SNOWFLAKE_DATABASE, }); }
Example #15
Source File: ConnectorTest.java From snowflake-kafka-connector with Apache License 2.0 | 5 votes |
@Test public void testValidateErrorURLFormatConfig() { Map<String, String> config = getCorrectConfig(); config.put(SnowflakeSinkConnectorConfig.SNOWFLAKE_URL, "https://google.com"); Map<String, ConfigValue> validateMap = toValidateMap(config); assertPropHasError(validateMap, new String[]{ SnowflakeSinkConnectorConfig.SNOWFLAKE_URL }); }
Example #16
Source File: ConnectorTest.java From snowflake-kafka-connector with Apache License 2.0 | 5 votes |
@Test public void testValidateErrorURLAccountConfig() { Map<String, String> config = getCorrectConfig(); config.put(SnowflakeSinkConnectorConfig.SNOWFLAKE_URL, "wronggAccountt.snowflakecomputing.com:443"); Map<String, ConfigValue> validateMap = toValidateMap(config); assertPropHasError(validateMap, new String[]{ SnowflakeSinkConnectorConfig.SNOWFLAKE_USER, SnowflakeSinkConnectorConfig.SNOWFLAKE_URL, SnowflakeSinkConnectorConfig.SNOWFLAKE_PRIVATE_KEY }); }
Example #17
Source File: ConnectorTest.java From snowflake-kafka-connector with Apache License 2.0 | 5 votes |
@Test public void testValidateErrorUserConfig() { Map<String, String> config = getCorrectConfig(); config.put(SnowflakeSinkConnectorConfig.SNOWFLAKE_USER, "wrongUser"); Map<String, ConfigValue> validateMap = toValidateMap(config); assertPropHasError(validateMap, new String[]{ SnowflakeSinkConnectorConfig.SNOWFLAKE_USER, SnowflakeSinkConnectorConfig.SNOWFLAKE_URL, SnowflakeSinkConnectorConfig.SNOWFLAKE_PRIVATE_KEY }); }
Example #18
Source File: ConnectorTest.java From snowflake-kafka-connector with Apache License 2.0 | 5 votes |
@Test public void testValidateErrorPasswordConfig() { Map<String, String> config = getCorrectConfig(); config.put(SnowflakeSinkConnectorConfig.SNOWFLAKE_PRIVATE_KEY, "wrongPassword"); Map<String, ConfigValue> validateMap = toValidateMap(config); assertPropHasError(validateMap, new String[]{ SnowflakeSinkConnectorConfig.SNOWFLAKE_PRIVATE_KEY }); }
Example #19
Source File: ConnectorTest.java From snowflake-kafka-connector with Apache License 2.0 | 5 votes |
@Test public void testValidateEmptyPasswordConfig() { Map<String, String> config = getCorrectConfig(); config.put(SnowflakeSinkConnectorConfig.SNOWFLAKE_PRIVATE_KEY, ""); Map<String, ConfigValue> validateMap = toValidateMap(config); assertPropHasError(validateMap, new String[]{ SnowflakeSinkConnectorConfig.SNOWFLAKE_PRIVATE_KEY }); }
Example #20
Source File: ConnectorTest.java From snowflake-kafka-connector with Apache License 2.0 | 5 votes |
@Test public void testValidateNullPasswordConfig() { Map<String, String> config = getCorrectConfig(); config.remove(SnowflakeSinkConnectorConfig.SNOWFLAKE_PRIVATE_KEY); Map<String, ConfigValue> validateMap = toValidateMap(config); assertPropHasError(validateMap, new String[]{ SnowflakeSinkConnectorConfig.SNOWFLAKE_PRIVATE_KEY }); }
Example #21
Source File: ConnectorTest.java From snowflake-kafka-connector with Apache License 2.0 | 5 votes |
@Test public void testValidateErrorPassphraseConfig() { Map<String, String> config = getCorrectConfig(); config.put(SnowflakeSinkConnectorConfig.SNOWFLAKE_PRIVATE_KEY_PASSPHRASE, "wrongPassphrase"); Map<String, ConfigValue> validateMap = toValidateMap(config); assertPropHasError(validateMap, new String[]{ SnowflakeSinkConnectorConfig.SNOWFLAKE_PRIVATE_KEY, SnowflakeSinkConnectorConfig.SNOWFLAKE_PRIVATE_KEY_PASSPHRASE }); }
Example #22
Source File: GitHubSourceConnectorConfigTest.java From kafka-connect-github-source with MIT License | 5 votes |
@Test public void validateBatchSize() { config.put(BATCH_SIZE_CONFIG, "-1"); ConfigValue configValue = configDef.validateAll(config).get(BATCH_SIZE_CONFIG); assertTrue(configValue.errorMessages().size() > 0); config.put(BATCH_SIZE_CONFIG, "101"); configValue = configDef.validateAll(config).get(BATCH_SIZE_CONFIG); assertTrue(configValue.errorMessages().size() > 0); }
Example #23
Source File: ConnectionValidator.java From mongo-kafka with Apache License 2.0 | 5 votes |
public static Optional<ConfigValue> getConfigByName(final Config config, final String name) { for (final ConfigValue configValue : config.configValues()) { if (configValue.name().equals(name)) { return Optional.of(configValue); } } return Optional.empty(); }
Example #24
Source File: ConnectorTest.java From snowflake-kafka-connector with Apache License 2.0 | 5 votes |
@Test public void testValidateErrorDatabaseConfig() { Map<String, String> config = getCorrectConfig(); config.put(SnowflakeSinkConnectorConfig.SNOWFLAKE_DATABASE, "wrongDatabase"); Map<String, ConfigValue> validateMap = toValidateMap(config); assertPropHasError(validateMap, new String[]{ SnowflakeSinkConnectorConfig.SNOWFLAKE_DATABASE }); }
Example #25
Source File: ConnectorTest.java From snowflake-kafka-connector with Apache License 2.0 | 5 votes |
@Test public void testValidateErrorSchemaConfig() { Map<String, String> config = getCorrectConfig(); config.put(SnowflakeSinkConnectorConfig.SNOWFLAKE_SCHEMA, "wrongSchema"); Map<String, ConfigValue> validateMap = toValidateMap(config); assertPropHasError(validateMap, new String[]{ SnowflakeSinkConnectorConfig.SNOWFLAKE_SCHEMA }); }
Example #26
Source File: ConfigDefTest.java From kafka-connectors with Apache License 2.0 | 5 votes |
@Test public void testValidateMissingConfigKey() { Map<String, ConfigValue> expected = new HashMap<>(); String errorMessageB = "Missing required configuration \"b\" which has no default value."; String errorMessageC = "Missing required configuration \"c\" which has no default value."; String errorMessageD = "d is referred in the dependents, but not defined."; ConfigValue configA = new ConfigValue("a", 1, Arrays.<Object>asList(1, 2, 3), Collections.<String>emptyList()); ConfigValue configB = new ConfigValue("b", null, Arrays.<Object>asList(4, 5), Arrays.asList(errorMessageB)); ConfigValue configC = new ConfigValue("c", null, Arrays.<Object>asList(4, 5), Arrays.asList(errorMessageC)); ConfigValue configD = new ConfigValue("d", null, Collections.emptyList(), Arrays.asList(errorMessageD)); configD.visible(false); expected.put("a", configA); expected.put("b", configB); expected.put("c", configC); expected.put("d", configD); ConfigDef def = new ConfigDef() .define("a", Type.INT, Importance.HIGH, "docs", "group", 1, Width.SHORT, "a", Arrays.asList("b", "c", "d"), new IntegerRecommender(false)) .define("b", Type.INT, Importance.HIGH, "docs", "group", 2, Width.SHORT, "b", new IntegerRecommender(true)) .define("c", Type.INT, Importance.HIGH, "docs", "group", 3, Width.SHORT, "c", new IntegerRecommender(true)); Map<String, String> props = new HashMap<>(); props.put("a", "1"); List<ConfigValue> configs = def.validate(props); for (ConfigValue config: configs) { String name = config.name(); ConfigValue expectedConfig = expected.get(name); assertEquals(expectedConfig, config); } }
Example #27
Source File: MySqlSinkClickHouseConnector.java From kafka-connectors with Apache License 2.0 | 5 votes |
/** * 对输入的分布式表和本地表参数进行校验 * 包含: 分布式表和本地表的个数, 分布式表和本地表是否存在ClickHouse中 */ private void tableValidate(JdbcDataSource dataSource, Map<String, String> connectorConfigs, Map<String, ConfigValue> configValues) { ConfigValue sinkTablesVal = configValues.get(CLICKHOUSE_SINK_TABLES); String topics = connectorConfigs.get(TOPICS); ConfigValue sinkLocalTablesVal = configValues.get(CLICKHOUSE_SINK_LOCAL_TABLES); clusterTableValidate(sinkTablesVal, topics); localTableValidate(sinkTablesVal, topics, sinkLocalTablesVal); String sinkDb = connectorConfigs.get(CLICKHOUSE_SINK_DATABASE); ClickHouseConfigDef.clusterTableExist(dataSource, sinkTablesVal, sinkDb); ClickHouseConfigDef.localTableExist(dataSource, sinkLocalTablesVal, sinkDb); ClickHouseConfigDef.validateSinkDateColumns(configValues, dataSource, sinkDb, sinkTablesVal); ClickHouseConfigDef.validateSourceDateColumns(configValues, dataSource, sinkTablesVal); ClickHouseConfigDef.validateDateFormat(configValues, dataSource, sinkTablesVal); }
Example #28
Source File: MySqlSinkClickHouseConnector.java From kafka-connectors with Apache License 2.0 | 5 votes |
/** * 校验输入的分布式表参数 * 如果其值不为空, 则判断其个数是否为一个或者其个数是否与topic个数一一对应 */ private void clusterTableValidate(ConfigValue sinkTablesVal, String topics) { if (sinkTablesVal.value() != null) { String sinkTables = (String) sinkTablesVal.value(); String[] sinkTabArr = sinkTables.split(CONFIG_SEPARATOR); String[] topicsArr = topics.split(CONFIG_SEPARATOR); if (topicsArr.length != sinkTabArr.length) { sinkTablesVal.addErrorMessage(sinkTablesVal.name() + "需要与topic个数一一对应"); } } }
Example #29
Source File: ConfigDefTest.java From kafka-connectors with Apache License 2.0 | 5 votes |
@Test public void testValidate() { Map<String, ConfigValue> expected = new HashMap<>(); String errorMessageB = "Missing required configuration \"b\" which has no default value."; String errorMessageC = "Missing required configuration \"c\" which has no default value."; ConfigValue configA = new ConfigValue("a", 1, Arrays.<Object>asList(1, 2, 3), Collections.<String>emptyList()); ConfigValue configB = new ConfigValue("b", null, Arrays.<Object>asList(4, 5), Arrays.asList(errorMessageB, errorMessageB)); ConfigValue configC = new ConfigValue("c", null, Arrays.<Object>asList(4, 5), Arrays.asList(errorMessageC)); ConfigValue configD = new ConfigValue("d", 10, Arrays.<Object>asList(1, 2, 3), Collections.<String>emptyList()); expected.put("a", configA); expected.put("b", configB); expected.put("c", configC); expected.put("d", configD); ConfigDef def = new ConfigDef() .define("a", Type.INT, Importance.HIGH, "docs", "group", 1, Width.SHORT, "a", Arrays.asList("b", "c"), new IntegerRecommender(false)) .define("b", Type.INT, Importance.HIGH, "docs", "group", 2, Width.SHORT, "b", new IntegerRecommender(true)) .define("c", Type.INT, Importance.HIGH, "docs", "group", 3, Width.SHORT, "c", new IntegerRecommender(true)) .define("d", Type.INT, Importance.HIGH, "docs", "group", 4, Width.SHORT, "d", Arrays.asList("b"), new IntegerRecommender(false)); Map<String, String> props = new HashMap<>(); props.put("a", "1"); props.put("d", "10"); List<ConfigValue> configs = def.validate(props); for (ConfigValue config : configs) { String name = config.name(); ConfigValue expectedConfig = expected.get(name); assertEquals(expectedConfig, config); } }
Example #30
Source File: ClickHouseConfigDef.java From kafka-connectors with Apache License 2.0 | 5 votes |
/** * 对ClickHouse连接进行校验 */ public static JdbcDataSource clickHouseConnectValidate(Map<String, ConfigValue> configValues) { ConfigValue hosts = configValues.get(CLICKHOUSE_HOSTS); ConfigValue port = configValues.get(CLICKHOUSE_JDBC_PORT); ConfigValue user = configValues.get(CLICKHOUSE_JDBC_USER); ConfigValue password = configValues.get(CLICKHOUSE_JDBC_PASSWORD); ConfigValue sinkDb = configValues.get(CLICKHOUSE_SINK_DATABASE); JdbcDataSource dataSource = null; if (hosts.errorMessages().isEmpty() && port.errorMessages().isEmpty() && sinkDb.errorMessages().isEmpty()) { JdbcConnectConfig connectConfig = JdbcConnectConfig.initCkConnectConfig((String) hosts.value(), (String) port.value(), user == null ? "" : (String) user.value(), password == null ? "" : (String) password.value()); try { dataSource = new JdbcDataSource(connectConfig); List<String> databases = dataSource.showDatabases(); String db = (String) sinkDb.value(); if (!databases.contains(db)) { sinkDb.addErrorMessage(String.format("%s数据库不存在", (String) sinkDb.value())); } dataSource.close(); dataSource = null; } catch (Exception e) { logger.error("校验ClickHouse连接失败: ", e); hosts.addErrorMessage("连接ClickHouse失败, " + e.getMessage()); } } return dataSource; }