Java Code Examples for com.typesafe.config.Config#hasPath()
The following examples show how to use
com.typesafe.config.Config#hasPath() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SecurityUtils.java From envelope with Apache License 2.0 | 6 votes |
public static String getTokenStoreFilePath(Config config, boolean onDriver) throws IOException { String tokenFilePrefix; if (config.hasPath(TOKENS_FILE)) { tokenFilePrefix = config.getString(TOKENS_FILE); } else { String userName = UserGroupInformation.getCurrentUser().getShortUserName(); String appId; if (onDriver) { appId = Contexts.getSparkSession().sparkContext().applicationId(); } else { appId = SparkEnv.get().conf().getAppId(); } tokenFilePrefix = String.format("/user/%s/.sparkStaging/%s/envelope_tokens", userName, appId); } return tokenFilePrefix; }
Example 2
Source File: SecureJobTemplate.java From incubator-gobblin with Apache License 2.0 | 6 votes |
/** * Filter the user config to only preserve the keys allowed by a secure template. */ static Config filterUserConfig(SecureJobTemplate template, Config userConfig, Logger logger) { if (!template.isSecure()) { return userConfig; } Config survivingConfig = ConfigFactory.empty(); for (String key : template.overridableProperties()) { if (userConfig.hasPath(key)) { survivingConfig = survivingConfig.withValue(key, userConfig.getValue(key)); userConfig = userConfig.withoutPath(key); } } if (!userConfig.isEmpty()) { logger.warn(String.format("Secure template %s ignored the following keys because they are not overridable: %s", template.getUri().toString(), userConfig.entrySet().stream().map(Map.Entry::getKey).collect(Collectors.joining(", ")))); } return survivingConfig; }
Example 3
Source File: LogOutput.java From envelope with Apache License 2.0 | 6 votes |
@Override public void configure(Config config) { if (config.hasPath(DELIMITER_CONFIG_NAME)) { delimiter = config.getString(DELIMITER_CONFIG_NAME); } else { delimiter = ","; } if (config.hasPath(LOG_LEVEL_CONFIG_NAME)) { logLevel = config.getString(LOG_LEVEL_CONFIG_NAME).toUpperCase(); } else { logLevel = "INFO"; } }
Example 4
Source File: Recorder.java From nassau with Apache License 2.0 | 6 votes |
private static void main(Config config, File file) throws IOException { addShutdownHook(); try (final BinaryFILEWriter writer = BinaryFILEWriter.open(file)) { MessageListener listener = new MessageListener() { @Override public void message(ByteBuffer buffer) throws IOException { writer.write(buffer); } }; if (config.hasPath("session.multicast-interface")) { try (MoldUDP64Client client = join(config, listener)) { receive(client); } } else { try (SoupBinTCPClient client = connect(config, listener)) { receive(client); } } } }
Example 5
Source File: TableLikeStageableTableMetadata.java From incubator-gobblin with Apache License 2.0 | 5 votes |
public TableLikeStageableTableMetadata(Table referenceTable, Config config) { super(HiveDataset.resolveTemplate(config.getString(StageableTableMetadata.DESTINATION_TABLE_KEY), referenceTable), HiveDataset.resolveTemplate(config.getString(StageableTableMetadata.DESTINATION_TABLE_KEY), referenceTable) + "_STAGING", HiveDataset.resolveTemplate(config.getString(StageableTableMetadata.DESTINATION_DB_KEY), referenceTable), HiveDataset.resolveTemplate(config.getString(DESTINATION_DATA_PATH_KEY), referenceTable), (!config.hasPath(StageableTableMetadata.DESTINATION_DATA_PATH_ADD_SUBDIR) || Boolean.parseBoolean(HiveDataset.resolveTemplate(config.getString(StageableTableMetadata.DESTINATION_DATA_PATH_ADD_SUBDIR), referenceTable))), getTableProperties(referenceTable), new ArrayList<>(), Optional.of(referenceTable.getNumBuckets()), new Properties(), false, false, Optional.absent(), new ArrayList<>()); }
Example 6
Source File: QPSPolicy.java From incubator-gobblin with Apache License 2.0 | 5 votes |
public QPSPolicy(Config config) { Preconditions.checkArgument(config.hasPath(QPS), "QPS required."); this.qps = config.getLong(QPS); long fullRequestTimeoutMillis = config.hasPath(FULL_REQUEST_TIMEOUT_MILLIS) ? config.getLong(FULL_REQUEST_TIMEOUT_MILLIS) : DEFAULT_FULL_REQUEST_TIMEOUT; long maxBucketSizeMillis = config.hasPath(MAX_BUCKET_SIZE_MILLIS) ? config.getLong(MAX_BUCKET_SIZE_MILLIS) : DEFAULT_MAX_BUCKET_SIZE; this.tokenBucket = new DynamicTokenBucket(qps, fullRequestTimeoutMillis, maxBucketSizeMillis); }
Example 7
Source File: HdfsAuditLogParserBolt.java From eagle with Apache License 2.0 | 5 votes |
public HdfsAuditLogParserBolt(Config config) { if (config.hasPath(DATASOURCE_TIMEZONE_PATH)) { TimeZone timeZone = TimeZone.getTimeZone(config.getString(DATASOURCE_TIMEZONE_PATH)); parser = new HDFSAuditLogParser(timeZone); } else { parser = new HDFSAuditLogParser(); } }
Example 8
Source File: BaseDataNode.java From incubator-gobblin with Apache License 2.0 | 5 votes |
public BaseDataNode(Config nodeProps) throws DataNodeCreationException { try { String nodeId = ConfigUtils.getString(nodeProps, FlowGraphConfigurationKeys.DATA_NODE_ID_KEY, ""); Preconditions.checkArgument(!Strings.isNullOrEmpty(nodeId), "Node Id cannot be null or empty"); this.id = nodeId; if (nodeProps.hasPath(FlowGraphConfigurationKeys.DATA_NODE_IS_ACTIVE_KEY)) { this.active = nodeProps.getBoolean(FlowGraphConfigurationKeys.DATA_NODE_IS_ACTIVE_KEY); } this.rawConfig = nodeProps; } catch (Exception e) { throw new DataNodeCreationException(e); } }
Example 9
Source File: ScheduledReporter.java From incubator-gobblin with Apache License 2.0 | 5 votes |
private MetricFilter createMetricFilter(Config config) { if (config.hasPath(METRIC_FILTER_NAME_REGEX) && config.hasPath(METRIC_FILTER_TYPE_LIST)) { return MetricFilters.and(new MetricNameRegexFilter(config.getString(METRIC_FILTER_NAME_REGEX)), new MetricTypeFilter(config.getString(METRIC_FILTER_TYPE_LIST))); } if (config.hasPath(METRIC_FILTER_NAME_REGEX)) { return new MetricNameRegexFilter(config.getString(METRIC_FILTER_NAME_REGEX)); } if (config.hasPath(METRIC_FILTER_TYPE_LIST)) { return new MetricTypeFilter(config.getString(METRIC_FILTER_TYPE_LIST)); } return MetricFilter.ALL; }
Example 10
Source File: IfPathExistsValidation.java From envelope with Apache License 2.0 | 5 votes |
@Override public ValidationResult validate(Config config) { if (config.hasPath(ifPath)) { return thenValidation.validate(config); } else { return new ValidationResult(this, Validity.VALID, "Conditional configuration '" + ifPath + "' does not exist, so it was not required that the validation (" + thenValidation + ") be checked"); } }
Example 11
Source File: ElasticClient.java From Stargraph with MIT License | 5 votes |
CreateIndexRequestBuilder prepareCreate() { logger.info(marker, "Creating {}", kbId); Config mappingCfg = getModelCfg().getConfig("elastic.mapping"); // Search for matching mapping definition, fallback to the dynamic _default_. String targetType = mappingCfg.hasPath(kbId.getModel()) ? kbId.getModel() : "_default_"; Config mapping = mappingCfg.withOnlyPath(targetType); CreateIndexRequestBuilder builder = client.admin().indices().prepareCreate(getIndexName()); return builder.addMapping(targetType, mapping.root().unwrapped()); }
Example 12
Source File: ReplicationConfiguration.java From incubator-gobblin with Apache License 2.0 | 5 votes |
public Builder withDefaultDataFlowTopologyConfig_PullMode(Config config) { if (config.hasPath(DEFAULT_DATA_FLOW_TOPOLOGIES_PULLMODE)) { this.defaultDataFlowTopology_PullModeConfig = Optional.of(config.getConfig(DEFAULT_DATA_FLOW_TOPOLOGIES_PULLMODE)); } else { this.defaultDataFlowTopology_PullModeConfig = Optional.absent(); } return this; }
Example 13
Source File: MRHistoryJobApplicationProvider.java From eagle with Apache License 2.0 | 5 votes |
@Override public Optional<List<Service>> getSharedServices(Config envConfig) { if (envConfig.hasPath(MRHistoryJobDailyReporter.SERVICE_PATH)) { return Optional.of(Collections.singletonList(new MRHistoryJobDailyReporter(envConfig))); } else { return Optional.empty(); } }
Example 14
Source File: HBaseUtils.java From envelope with Apache License 2.0 | 5 votes |
public static Configuration getHBaseConfiguration(Config config) throws IOException { Configuration hbaseConfiguration = HBaseConfiguration.create(); if (config.hasPath(ZK_QUORUM_PROPERTY)) { String zkQuorum = config.getString(ZK_QUORUM_PROPERTY); hbaseConfiguration.set(HConstants.ZOOKEEPER_QUORUM, zkQuorum); } LOG.debug("HBase:: Using ZK quorum: {}", hbaseConfiguration.get(HConstants.ZOOKEEPER_QUORUM)); LOG.debug("HBase:: Using security: {}", hbaseConfiguration.get("hadoop.security.authentication")); // Add any other pass-through options starting with HBASE_PASSTHRU_PREFIX if (config.hasPath(HBASE_PASSTHRU_PREFIX)) { Config hbaseConfigs = config.getConfig(HBASE_PASSTHRU_PREFIX); for (Map.Entry<String, ConfigValue> entry : hbaseConfigs.entrySet()) { String param = entry.getKey(); String value = null; switch (entry.getValue().valueType()) { case STRING: value = (String) entry.getValue().unwrapped(); break; default: LOG.warn("Only string parameters currently " + "supported, auto-converting to String [{}]", param); value = entry.getValue().unwrapped().toString(); } if (value != null) { hbaseConfiguration.set(param, value); } } } return hbaseConfiguration; }
Example 15
Source File: MRRunningJobConfig.java From eagle with Apache License 2.0 | 5 votes |
private String getConfigValue(Config config, String key, String defaultValue) { if (config.hasPath(key)) { return config.getString(key); } else { return defaultValue; } }
Example 16
Source File: MongoMetadataDaoImpl.java From eagle with Apache License 2.0 | 5 votes |
@Inject public MongoMetadataDaoImpl(Config config) { this.connection = config.getString(MetadataUtils.MONGO_CONNECTION_PATH); this.cappedMaxSize = config.hasPath(MONGO_CAPPED_MAX_SIZE) ? config.getInt(MONGO_CAPPED_MAX_SIZE) : DEFAULT_CAPPED_MAX_SIZE; this.cappedMaxDocuments = config.hasPath(MONGO_CAPPED_MAX_DOCUMENTS) ? config.getInt(MONGO_CAPPED_MAX_DOCUMENTS) : DEFAULT_CAPPED_MAX_DOCUMENTS; this.client = new MongoClient(new MongoClientURI(this.connection)); this.dbname = config.hasPath(MetadataUtils.MONGO_DATABASE) ? config.getString(MetadataUtils.MONGO_DATABASE) : DEFAULT_DB_NAME; init(); }
Example 17
Source File: Args.java From gsc-core with GNU Lesser General Public License v3.0 | 5 votes |
private static void initRocksDbBackupProperty(Config config) { boolean enable = config.hasPath("storage.backup.enable") && config.getBoolean("storage.backup.enable"); String propPath = config.hasPath("storage.backup.propPath") ? config.getString("storage.backup.propPath") : "prop.properties"; String bak1path = config.hasPath("storage.backup.bak1path") ? config.getString("storage.backup.bak1path") : "bak1/database/"; String bak2path = config.hasPath("storage.backup.bak2path") ? config.getString("storage.backup.bak2path") : "bak2/database/"; int frequency = config.hasPath("storage.backup.frequency") ? config.getInt("storage.backup.frequency") : 10000; INSTANCE.dbBackupConfig = DbBackupConfig.getInstance() .initArgs(enable, propPath, bak1path, bak2path, frequency); }
Example 18
Source File: JobConfigurationManager.java From incubator-gobblin with Apache License 2.0 | 5 votes |
public JobConfigurationManager(EventBus eventBus, Config config) { this.eventBus = eventBus; this.config = config; this.jobConfDirPath = config.hasPath(GobblinClusterConfigurationKeys.JOB_CONF_PATH_KEY) ? Optional .of(config.getString(GobblinClusterConfigurationKeys.JOB_CONF_PATH_KEY)) : Optional.<String>absent(); try { this.jobSpecResolver = JobSpecResolver.builder(config).build(); } catch (IOException ioe) { throw new RuntimeException(ioe); } }
Example 19
Source File: AWSDatabaseHolder.java From billow with Apache License 2.0 | 4 votes |
public AWSDatabaseHolder(Config config) { maxAgeInMs = config.getDuration("maxAge", TimeUnit.MILLISECONDS); final DefaultAWSCredentialsProviderChain awsCredentialsProviderChain = new DefaultAWSCredentialsProviderChain(); final ClientConfiguration clientConfig = new ClientConfiguration(); clientConfig.setRetryPolicy(new RetryPolicy(null, null, config.getInt("maxErrorRetry"), true)); clientConfig.setSocketTimeout(config.getInt("socketTimeout") * 1000); final AmazonEC2 bootstrapEC2Client = AmazonEC2ClientBuilder.standard().withCredentials(awsCredentialsProviderChain).build(); ec2Clients = Maps.newHashMap(); rdsClients = Maps.newHashMap(); sqsClients = Maps.newHashMap(); dynamoDBClients = Maps.newHashMap(); elasticacheClients = Maps.newHashMap(); elasticsearchClients = Maps.newHashMap(); final List<Region> ec2Regions = bootstrapEC2Client.describeRegions().getRegions(); for (Region region : ec2Regions) { final String regionName = region.getRegionName(); final String endpoint = region.getEndpoint(); log.debug("Adding ec2 region {}", region); if (config.getBoolean("ec2Enabled")) { final AmazonEC2Client ec2Client = new AmazonEC2Client(awsCredentialsProviderChain, clientConfig); ec2Client.setEndpoint(endpoint); ec2Clients.put(regionName, ec2Client); } if (config.getBoolean("rdsEnabled")) { final AmazonRDSClient rdsClient = new AmazonRDSClient(awsCredentialsProviderChain, clientConfig); rdsClient.setEndpoint(endpoint.replaceFirst("ec2\\.", "rds.")); rdsClients.put(regionName, rdsClient); } if (config.getBoolean("dynamodbEnabled")) { final AmazonDynamoDBClient dynamoDBClient = new AmazonDynamoDBClient(awsCredentialsProviderChain, clientConfig); dynamoDBClient.setEndpoint(endpoint.replaceFirst("ec2\\.", "dynamodb.")); dynamoDBClients.put(regionName, dynamoDBClient); } if (config.getBoolean("sqsEnabled")) { final AmazonSQSClient sqsClient = new AmazonSQSClient(awsCredentialsProviderChain, clientConfig); sqsClient.setEndpoint(endpoint.replaceFirst("ec2\\.", "sqs.")); sqsClients.put(regionName, sqsClient); } if (config.getBoolean("elasticacheEnabled")) { final AmazonElastiCacheClient elastiCacheClient = new AmazonElastiCacheClient (awsCredentialsProviderChain, clientConfig); elastiCacheClient.setEndpoint(endpoint.replaceFirst("ec2\\.", "elasticache.")); elasticacheClients.put(regionName, elastiCacheClient); } if (config.getBoolean("elasticsearchEnabled")) { final AWSElasticsearchClient elasticsearchClient = new AWSElasticsearchClient (awsCredentialsProviderChain, clientConfig); elasticsearchClient.setEndpoint(endpoint.replaceFirst("ec2\\.", "es.")); elasticsearchClients.put(regionName, elasticsearchClient); } } this.iamClient = AmazonIdentityManagementClientBuilder.standard() .withCredentials(awsCredentialsProviderChain) .withClientConfiguration(clientConfig) .build(); if (config.hasPath("accountNumber")) { this.awsAccountNumber = config.getString("accountNumber"); } else { this.awsAccountNumber = null; } if (config.hasPath("arnPartition")) { this.awsARNPartition = config.getString("arnPartition"); } else { this.awsARNPartition = "aws"; } rebuild(); }
Example 20
Source File: ConfigUtils.java From incubator-gobblin with Apache License 2.0 | 3 votes |
/** * Return {@link Integer} value at <code>path</code> if <code>config</code> has path. If not return <code>def</code> * * @param config in which the path may be present * @param path key to look for in the config object * @return {@link Integer} value at <code>path</code> if <code>config</code> has path. If not return <code>def</code> */ public static Integer getInt(Config config, String path, Integer def) { if (config.hasPath(path)) { return Integer.valueOf(config.getInt(path)); } return def; }