Java Code Examples for com.typesafe.config.Config#withValue()
The following examples show how to use
com.typesafe.config.Config#withValue() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DefaultHttpClientConfigurator.java From incubator-gobblin with Apache License 2.0 | 6 votes |
protected Config stateToConfig(State state) { String proxyUrlKey = getPrefixedPropertyName(PROXY_URL_KEY); String proxyPortKey = getPrefixedPropertyName(PROXY_PORT_KEY); String proxyHostportKey = getPrefixedPropertyName(PROXY_HOSTPORT_KEY); Config cfg = ConfigFactory.empty(); if (state.contains(proxyUrlKey)) { cfg = cfg.withValue(PROXY_URL_KEY, ConfigValueFactory.fromAnyRef(state.getProp(proxyUrlKey))); } if (state.contains(proxyPortKey)) { cfg = cfg.withValue(PROXY_PORT_KEY, ConfigValueFactory.fromAnyRef(state.getPropAsInt(proxyPortKey))); } if (state.contains(proxyHostportKey)) { cfg = cfg.withValue(PROXY_HOSTPORT_KEY, ConfigValueFactory.fromAnyRef(state.getProp(proxyHostportKey))); } return cfg; }
Example 2
Source File: FsStateStoreTest.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Test public void testBackwardsCompat() throws IOException { // Tests with a state store that was saved before the WritableShim changes Config bwConfig = ConfigFactory.load(config); URL path = getClass().getResource("/backwardsCompatTestStore"); Assert.assertNotNull(path); bwConfig = bwConfig.withValue(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, ConfigValueFactory.fromAnyRef(path.toString())); StateStore<State> bwStateStore = stateStoreFactory.createStateStore(bwConfig, State.class); Assert.assertTrue(bwStateStore.exists("testStore", "testTable")); List<State> states = bwStateStore.getAll("testStore", "testTable"); Assert.assertEquals(states.size(), 3); Assert.assertEquals(states.get(0).getProp("k1"), "v1"); Assert.assertEquals(states.get(0).getId(), "s1"); Assert.assertEquals(states.get(1).getProp("k2"), "v2"); Assert.assertEquals(states.get(1).getId(), "s2"); Assert.assertEquals(states.get(2).getProp("k3"), "v3"); Assert.assertEquals(states.get(2).getId(), "s3"); }
Example 3
Source File: TestValidator.java From envelope with Apache License 2.0 | 6 votes |
@Test public void testAllowEmptyValue() { ProvidesValidations validee = new ProvidesValidations() { @Override public Validations getValidations() { return Validations.builder().mandatoryPath("hello").allowEmptyValue("hello").build(); } }; Properties configProps = new Properties(); configProps.setProperty("hello", ""); Config config = ConfigFactory.parseProperties(configProps); assertNoValidationFailures(validee, config); config = config.withValue("hello", ConfigValueFactory.fromIterable( Lists.newArrayList(""))); assertNoValidationFailures(validee, config); }
Example 4
Source File: HiveDatasetDescriptor.java From incubator-gobblin with Apache License 2.0 | 6 votes |
public HiveDatasetDescriptor(Config config) throws IOException { super(config); this.isPartitioned = ConfigUtils.getBoolean(config, IS_PARTITIONED_KEY, true); if (isPartitioned) { partitionColumn = ConfigUtils.getString(config, PARTITION_COLUMN, DatePartitionHiveVersionFinder.DEFAULT_PARTITION_KEY_NAME); partitionFormat = ConfigUtils.getString(config, PARTITION_FORMAT, DatePartitionHiveVersionFinder.DEFAULT_PARTITION_VALUE_DATE_TIME_PATTERN); conflictPolicy = HiveCopyEntityHelper.ExistingEntityPolicy.REPLACE_PARTITIONS.name(); } else { partitionColumn = ""; partitionFormat = ""; conflictPolicy = HiveCopyEntityHelper.ExistingEntityPolicy.REPLACE_TABLE.name(); } whitelistBlacklist = new WhitelistBlacklist(config.withValue(WhitelistBlacklist.WHITELIST, ConfigValueFactory.fromAnyRef(createHiveDatasetWhitelist()))); this.setRawConfig(this.getRawConfig() .withValue(CONFLICT_POLICY, ConfigValueFactory.fromAnyRef(conflictPolicy)) .withValue(PARTITION_COLUMN, ConfigValueFactory.fromAnyRef(partitionColumn)) .withValue(PARTITION_FORMAT, ConfigValueFactory.fromAnyRef(partitionFormat)) .withValue(HiveDatasetFinder.HIVE_DATASET_PREFIX + "." + WhitelistBlacklist.WHITELIST, ConfigValueFactory.fromAnyRef(createHiveDatasetWhitelist()) )); }
Example 5
Source File: DremioConfig.java From dremio-oss with Apache License 2.0 | 5 votes |
private static Config setSystemProperty(Config config, String sysProp, String configProp){ String systemProperty = System.getProperty(sysProp); if(systemProperty != null) { config = config.withValue(configProp, ConfigValueFactory.fromAnyRef(systemProperty)); logger.info("Applying provided leagcy system property to config: -D{}={}", configProp, systemProperty); } return config; }
Example 6
Source File: IntegrationJobRestartViaSpecSuite.java From incubator-gobblin with Apache License 2.0 | 5 votes |
public void addJobSpec(String jobSpecName, String verb) throws IOException, URISyntaxException { Config jobConfig = ConfigFactory.empty(); if (SpecExecutor.Verb.ADD.name().equals(verb)) { jobConfig = getJobConfig(); } else if (SpecExecutor.Verb.DELETE.name().equals(verb)) { jobConfig = jobConfig.withValue(GobblinClusterConfigurationKeys.CANCEL_RUNNING_JOB_ON_DELETE, ConfigValueFactory.fromAnyRef("true")); } else if (SpecExecutor.Verb.UPDATE.name().equals(verb)) { jobConfig = getJobConfig().withValue(GobblinClusterConfigurationKeys.CANCEL_RUNNING_JOB_ON_DELETE, ConfigValueFactory.fromAnyRef("true")); } JobSpec jobSpec = JobSpec.builder(Files.getNameWithoutExtension(jobSpecName)) .withConfig(jobConfig) .withTemplate(new URI("FS:///")) .withDescription("HelloWorldTestJob") .withVersion("1") .build(); SpecExecutor.Verb enumVerb = SpecExecutor.Verb.valueOf(verb); switch (enumVerb) { case ADD: _specProducer.addSpec(jobSpec); break; case DELETE: _specProducer.deleteSpec(jobSpec.getUri()); break; case UPDATE: _specProducer.updateSpec(jobSpec); break; default: throw new IOException("Unknown Spec Verb: " + verb); } }
Example 7
Source File: ExecutorService.java From ldbc_graphalytics with Apache License 2.0 | 5 votes |
public static void InitService(BenchmarkExecutor executor) { Config config = defaultConfiguration(); config = config.withValue("akka.remote.netty.tcp.port", ConfigValueFactory.fromAnyRef(getExecutorPort())); config = config.withValue("akka.remote.netty.tcp.hostname", ConfigValueFactory.fromAnyRef(SERVICE_IP)); final ActorSystem system = ActorSystem.create(SERVICE_NAME, config); system.actorOf(Props.create(ExecutorService.class, executor), SERVICE_NAME); }
Example 8
Source File: FileBasedJobLockFactoryManagerTest.java From incubator-gobblin with Apache License 2.0 | 5 votes |
@Test public void testGetFactoryConfig() { FileBasedJobLockFactoryManager mgr = new FileBasedJobLockFactoryManager(); Config sysConfig1 = ConfigFactory.empty(); Assert.assertTrue(mgr.getFactoryConfig(sysConfig1).isEmpty()); Config sysConfig2 = sysConfig1.withValue("some.prop", ConfigValueFactory.fromAnyRef("test")); Assert.assertTrue(mgr.getFactoryConfig(sysConfig2).isEmpty()); Config sysConfig3 = sysConfig2.withValue(FileBasedJobLockFactoryManager.CONFIG_PREFIX + "." + FileBasedJobLockFactory.LOCK_DIR_CONFIG, ConfigValueFactory.fromAnyRef("/tmp")); Config factoryCfg3 = mgr.getFactoryConfig(sysConfig3); Assert.assertEquals(factoryCfg3.getString(FileBasedJobLockFactory.LOCK_DIR_CONFIG), "/tmp"); }
Example 9
Source File: BaseFlowEdgeFactoryTest.java From incubator-gobblin with Apache License 2.0 | 5 votes |
@Test public void testCreateFlowEdge() throws Exception { Properties properties = new Properties(); properties.put(FlowGraphConfigurationKeys.FLOW_EDGE_SOURCE_KEY,"node1"); properties.put(FlowGraphConfigurationKeys.FLOW_EDGE_DESTINATION_KEY, "node2"); properties.put(FlowGraphConfigurationKeys.FLOW_EDGE_NAME_KEY, "edge1"); properties.put(FlowGraphConfigurationKeys.FLOW_EDGE_ID_KEY, "node1:node2:edge1"); properties.put(FlowGraphConfigurationKeys.FLOW_EDGE_TEMPLATE_DIR_URI_KEY, "FS:///flowEdgeTemplate"); List<SpecExecutor> specExecutorList = new ArrayList<>(); Config config1 = ConfigFactory.empty().withValue("specStore.fs.dir", ConfigValueFactory.fromAnyRef("/tmp1")). withValue("specExecInstance.capabilities", ConfigValueFactory.fromAnyRef("s1:d1")); specExecutorList.add(new InMemorySpecExecutor(config1)); Config config2 = ConfigFactory.empty().withValue("specStore.fs.dir", ConfigValueFactory.fromAnyRef("/tmp2")). withValue("specExecInstance.capabilities", ConfigValueFactory.fromAnyRef("s2:d2")); specExecutorList.add(new InMemorySpecExecutor(config2)); FlowEdgeFactory flowEdgeFactory = new BaseFlowEdge.Factory(); Properties props = new Properties(); URI flowTemplateCatalogUri = this.getClass().getClassLoader().getResource("template_catalog").toURI(); props.put(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY, flowTemplateCatalogUri.toString()); Config config = ConfigFactory.parseProperties(props); Config templateCatalogCfg = config .withValue(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY, config.getValue(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY)); FSFlowTemplateCatalog catalog = new FSFlowTemplateCatalog(templateCatalogCfg); Config edgeProps = ConfigUtils.propertiesToConfig(properties); FlowEdge flowEdge = flowEdgeFactory.createFlowEdge(edgeProps, catalog, specExecutorList); Assert.assertEquals(flowEdge.getSrc(), "node1"); Assert.assertEquals(flowEdge.getDest(), "node2"); Assert.assertEquals(flowEdge.getExecutors().get(0).getConfig().get().getString("specStore.fs.dir"),"/tmp1"); Assert.assertEquals(flowEdge.getExecutors().get(0).getConfig().get().getString("specExecInstance.capabilities"),"s1:d1"); Assert.assertEquals(flowEdge.getExecutors().get(1).getConfig().get().getString("specStore.fs.dir"),"/tmp2"); Assert.assertEquals(flowEdge.getExecutors().get(1).getConfig().get().getString("specExecInstance.capabilities"),"s2:d2"); Assert.assertEquals(flowEdge.getExecutors().get(0).getClass().getSimpleName(),"InMemorySpecExecutor"); Assert.assertEquals(flowEdge.getExecutors().get(1).getClass().getSimpleName(),"InMemorySpecExecutor"); }
Example 10
Source File: AkkaSource.java From flink-learning with Apache License 2.0 | 5 votes |
private Config getOrCreateMandatoryProperties(Config properties) { if (!properties.hasPath("akka.actor.provider")) { properties = properties.withValue("akka.actor.provider", ConfigValueFactory.fromAnyRef("akka.remote.RemoteActorRefProvider")); } if (!properties.hasPath("akka.remote.enabled-transports")) { properties = properties.withValue("akka.remote.enabled-transports", ConfigValueFactory.fromAnyRef(Collections.singletonList("akka.remote.netty.tcp"))); } return properties; }
Example 11
Source File: TestHBaseOutput.java From envelope with Apache License 2.0 | 5 votes |
@Test public void testApplyBulkMutations() throws Exception { Table table = connection.getTable(TableName.valueOf(TABLE)); Config config = ConfigUtils.configFromResource("/hbase/hbase-output-test.conf").getConfig("output"); config = config.withValue("zookeeper", ConfigValueFactory.fromAnyRef("localhost:" + utility.getZkCluster().getClientPort())); HBaseOutput output = new HBaseOutput(); output.configure(config); // Generate bulk mutations Dataset<Row> upserts = createBulkMutations(INPUT_ROWS); Dataset<Row> deletes = createBulkMutations(INPUT_ROWS); List<Tuple2<MutationType, Dataset<Row>>> bulk1 = Lists.newArrayList(); bulk1.add(new Tuple2<>(MutationType.UPSERT, upserts)); List<Tuple2<MutationType, Dataset<Row>>> bulk2 = Lists.newArrayList(); bulk2.add(new Tuple2<>(MutationType.DELETE, deletes)); List<Tuple2<MutationType, Dataset<Row>>> bulk3 = Lists.newArrayList(); bulk3.add(new Tuple2<>(MutationType.UPSERT, upserts)); bulk3.add(new Tuple2<>(MutationType.DELETE, deletes)); // Run 1 should have 2000 output.applyBulkMutations(bulk1); scanAndCountTable(table, INPUT_ROWS * 4); // Run 2 should have 0 output.applyBulkMutations(bulk2); scanAndCountTable(table, 0); // Run 3 should have 0 output.applyBulkMutations(bulk3); scanAndCountTable(table, 0); }
Example 12
Source File: TestFlagFileRepetition.java From envelope with Apache License 2.0 | 5 votes |
@Test public void testRepeatStepPresent() throws IOException { Config config = ConfigUtils.configFromResource("/repetitions/repetitions-flag-config.conf"); File relativeFlagFile = new File("flag"); config = config.withValue("steps.repeater.repetitions.hdfsinnit.file", ConfigValueFactory.fromAnyRef(relativeFlagFile.toURI().toString())); try { BatchStep step = new BatchStep("testFlagRepetition"); ValidationAssert.assertNoValidationFailures(step, config.getConfig("steps.repeater")); step.configure(config.getConfig("steps.repeater")); Set<DataStep> steps = Repetitions.get().getAndClearRepeatingSteps(); assertTrue("Repeating steps should not be populated", steps.isEmpty()); // Place flag file assertTrue(fs.createNewFile(flagFile)); // Should _not_ be empty waitForResponse(300, false, 10); // Should immediately be empty waitForResponse(300, true, 1); // Repeat again - add the flag file back assertTrue(fs.createNewFile(flagFile)); // Should _not_ be empty waitForResponse(300, false, 10); // Should immediately be empty waitForResponse(300, true, 1); } catch (Exception e) { System.err.println(e.getMessage()); fail(); } }
Example 13
Source File: GobblinTaskRunner.java From incubator-gobblin with Apache License 2.0 | 5 votes |
private Config saveConfigToFile(Config config) throws IOException { Config newConf = config .withValue(CLUSTER_APP_WORK_DIR, ConfigValueFactory.fromAnyRef(this.appWorkPath.toString())); ConfigUtils configUtils = new ConfigUtils(new FileUtils()); configUtils.saveConfigToFile(newConf, CLUSTER_CONF_PATH); return newConf; }
Example 14
Source File: SftpDataNodeTest.java From incubator-gobblin with Apache License 2.0 | 5 votes |
@Test public void testCreate() throws DataNode.DataNodeCreationException { //Create a SFTP DataNode with default SFTP port Config config = ConfigFactory.empty().withValue(SftpDataNode.SFTP_HOSTNAME, ConfigValueFactory.fromAnyRef("testHost")) .withValue(FlowGraphConfigurationKeys.DATA_NODE_ID_KEY, ConfigValueFactory.fromAnyRef("testId")); SftpDataNode dataNode = new SftpDataNode(config); Assert.assertEquals(dataNode.getId(), "testId"); Assert.assertEquals(dataNode.getHostName(), "testHost"); Assert.assertEquals(dataNode.getPort().intValue(), ConfigurationKeys.SOURCE_CONN_DEFAULT_PORT); Assert.assertEquals(dataNode.getDefaultDatasetDescriptorPlatform(), SftpDataNode.PLATFORM); Assert.assertEquals(dataNode.getDefaultDatasetDescriptorClass(), FSDatasetDescriptor.class.getCanonicalName()); config = config.withValue(SftpDataNode.SFTP_PORT, ConfigValueFactory.fromAnyRef(143)); SftpDataNode dataNodeWithPort = new SftpDataNode(config); Assert.assertEquals(dataNode.getId(), "testId"); Assert.assertEquals(dataNode.getHostName(), "testHost"); Assert.assertEquals(dataNodeWithPort.getPort().intValue(), 143); Assert.assertEquals(dataNode.getDefaultDatasetDescriptorPlatform(), SftpDataNode.PLATFORM); Assert.assertEquals(dataNode.getDefaultDatasetDescriptorClass(), FSDatasetDescriptor.class.getCanonicalName()); Config configMissingProps = ConfigFactory.empty().withValue(FlowGraphConfigurationKeys.DATA_NODE_ID_KEY, ConfigValueFactory.fromAnyRef("testId")); try { DataNode sftpNode = new SftpDataNode(configMissingProps); Assert.fail("Unexpected success in creating Sftp node."); } catch (DataNode.DataNodeCreationException e) { //Expected exception. } }
Example 15
Source File: AbstractPathFinder.java From incubator-gobblin with Apache License 2.0 | 4 votes |
/** * A helper method to make the output {@link DatasetDescriptor} of a {@link FlowEdge} "specific". More precisely, * we replace any "placeholder" configurations in the output {@link DatasetDescriptor} with specific configuration * values obtained from the input {@link DatasetDescriptor}. A placeholder configuration is one which is not * defined or is set to {@link DatasetDescriptorConfigKeys#DATASET_DESCRIPTOR_CONFIG_ANY}. * * Example: Consider a {@link FlowEdge} that applies retention on an input dataset. Further assume that this edge * is applicable to datasets of all formats. The input and output descriptors of this edge may be described using the following * configs: * inputDescriptor = Config(SimpleConfigObject({"class":"org.apache.gobblin.service.modules.dataset.FSDatasetDescriptor", * "codec":"any","encrypt":{"algorithm":"any","keystore_encoding":"any","keystore_type":"any"},"format":"any", * "isRetentionApplied":false,"path":"/data/encrypted/testTeam/testDataset","platform":"hdfs"})) * * outputDescriptor = Config(SimpleConfigObject({"class":"org.apache.gobblin.service.modules.dataset.FSDatasetDescriptor", * "codec":"any","encrypt":{"algorithm":"any","keystore_encoding":"any","keystore_type":"any"},"format":"any", * "isRetentionApplied":true,"path":"/data/encrypted/testTeam/testDataset","platform":"hdfs"})) * * Let the intermediate dataset descriptor "arriving" at this edge be described using the following config: * currentDescriptor = Config(SimpleConfigObject({"class":"org.apache.gobblin.service.modules.dataset.FSDatasetDescriptor", * "codec":"gzip","encrypt":{"algorithm":"aes_rotating","keystore_encoding":"base64","keystore_type":"json"},"format":"json", * "isRetentionApplied":false,"path":"/data/encrypted/testTeam/testDataset","platform":"hdfs"})). * * This method replaces the placeholder configs in outputDescriptor with specific values from currentDescriptor to return: * returnedDescriptor = Config(SimpleConfigObject({"class":"org.apache.gobblin.service.modules.dataset.FSDatasetDescriptor", * "codec":"gzip","encrypt":{"algorithm":"aes_rotating","keystore_encoding":"base64","keystore_type":"json"},"format":"json", * "isRetentionApplied":<b>true</b>,"path":"/data/encrypted/testTeam/testDataset","platform":"hdfs"})). * * @param currentDescriptor intermediate {@link DatasetDescriptor} obtained during path finding. * @param outputDescriptor output {@link DatasetDescriptor} of a {@link FlowEdge}. * @return {@link DatasetDescriptor} with placeholder configs in outputDescriptor substituted with specific values * from the currentDescriptor. */ private DatasetDescriptor makeOutputDescriptorSpecific(DatasetDescriptor currentDescriptor, DatasetDescriptor outputDescriptor) throws ReflectiveOperationException { Config config = outputDescriptor.getRawConfig(); for (Map.Entry<String, ConfigValue> entry : currentDescriptor.getRawConfig().entrySet()) { String entryValue = entry.getValue().unwrapped().toString(); if (!isPlaceHolder(entryValue)) { String entryValueInOutputDescriptor = ConfigUtils.getString(config, entry.getKey(), StringUtils.EMPTY); if (isPlaceHolder(entryValueInOutputDescriptor)) { config = config.withValue(entry.getKey(), ConfigValueFactory.fromAnyRef(entryValue)); } } } return GobblinConstructorUtils.invokeLongestConstructor(outputDescriptor.getClass(), config); }
Example 16
Source File: ServiceSpecificEnvironmentConfigSupplier.java From ditto with Eclipse Public License 2.0 | 4 votes |
private Config withHostingEnvironmentValue(final Config config) { return config.withValue(HostingEnvironment.CONFIG_PATH, getHostingEnvironmentConfig()); }
Example 17
Source File: NoopDatasetStateStore.java From incubator-gobblin with Apache License 2.0 | 4 votes |
@Override public DatasetStateStore<JobState.DatasetState> createStateStore(Config config) { // dummy root dir for noop state store Config config2 = config.withValue(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, ConfigValueFactory.fromAnyRef("")); return FsDatasetStateStore.createStateStore(config2, NoopDatasetStateStore.class.getName()); }
Example 18
Source File: BaseFlowToJobSpecCompiler.java From incubator-gobblin with Apache License 2.0 | 4 votes |
public BaseFlowToJobSpecCompiler(Config config, Optional<Logger> log, boolean instrumentationEnabled){ this.log = log.isPresent() ? log.get() : LoggerFactory.getLogger(getClass()); if (instrumentationEnabled) { this.metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(config), IdentityFlowToJobSpecCompiler.class); this.flowCompilationSuccessFulMeter = Optional.of(this.metricContext.meter(ServiceMetricNames.FLOW_COMPILATION_SUCCESSFUL_METER)); this.flowCompilationFailedMeter = Optional.of(this.metricContext.meter(ServiceMetricNames.FLOW_COMPILATION_FAILED_METER)); this.flowCompilationTimer = Optional.<Timer>of(this.metricContext.timer(ServiceMetricNames.FLOW_COMPILATION_TIMER)); this.dataAuthorizationTimer = Optional.<Timer>of(this.metricContext.timer(ServiceMetricNames.DATA_AUTHORIZATION_TIMER)); } else { this.metricContext = null; this.flowCompilationSuccessFulMeter = Optional.absent(); this.flowCompilationFailedMeter = Optional.absent(); this.flowCompilationTimer = Optional.absent(); this.dataAuthorizationTimer = Optional.absent(); } this.topologySpecMap = Maps.newConcurrentMap(); this.config = config; /*** * ETL-5996 * For multi-tenancy, the following needs to be added: * 1. Change singular templateCatalog to Map<URI, JobCatalogWithTemplates> to support multiple templateCatalogs * 2. Pick templateCatalog from JobCatalogWithTemplates based on URI, and try to resolve JobSpec using that */ try { if (this.config.hasPath(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY) && StringUtils.isNotBlank(this.config.getString(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY))) { Config templateCatalogCfg = config .withValue(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY, this.config.getValue(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY)); this.templateCatalog = Optional.of(new FSJobCatalog(templateCatalogCfg)); } else { this.templateCatalog = Optional.absent(); } } catch (IOException e) { throw new RuntimeException("Could not initialize FlowCompiler because of " + "TemplateCatalog initialization failure", e); } }
Example 19
Source File: GobblinTaskRunnerTest.java From incubator-gobblin with Apache License 2.0 | 4 votes |
@BeforeClass public void setUp() throws Exception { this.testingZKServer = new TestingServer(-1); LOG.info("Testing ZK Server listening on: " + testingZKServer.getConnectString()); URL url = GobblinTaskRunnerTest.class.getClassLoader().getResource( GobblinTaskRunnerTest.class.getSimpleName() + ".conf"); Assert.assertNotNull(url, "Could not find resource " + url); Config config = ConfigFactory.parseURL(url) .withValue("gobblin.cluster.zk.connection.string", ConfigValueFactory.fromAnyRef(testingZKServer.getConnectString())) .withValue(GobblinClusterConfigurationKeys.HADOOP_CONFIG_OVERRIDES_PREFIX + "." + HADOOP_OVERRIDE_PROPERTY_NAME, ConfigValueFactory.fromAnyRef("value")) .withValue(GobblinClusterConfigurationKeys.HADOOP_CONFIG_OVERRIDES_PREFIX + "." + "fs.file.impl.disable.cache", ConfigValueFactory.fromAnyRef("true")) .resolve(); String zkConnectionString = config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY); this.clusterName = config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY); HelixUtils.createGobblinHelixCluster(zkConnectionString, this.clusterName); // Participant this.gobblinTaskRunner = new GobblinTaskRunner(TestHelper.TEST_APPLICATION_NAME, TestHelper.TEST_HELIX_INSTANCE_NAME, TestHelper.TEST_APPLICATION_ID, TestHelper.TEST_TASK_RUNNER_ID, config, Optional.<Path>absent()); // Participant String healthCheckInstance = HelixUtils.getHelixInstanceName("HealthCheckHelixInstance", 0); this.gobblinTaskRunnerHealthCheck = new GobblinTaskRunner(TestHelper.TEST_APPLICATION_NAME, healthCheckInstance, TestHelper.TEST_APPLICATION_ID, TestHelper.TEST_TASK_RUNNER_ID, config.withValue(GobblinClusterConfigurationKeys.CONTAINER_EXIT_ON_HEALTH_CHECK_FAILURE_ENABLED, ConfigValueFactory.fromAnyRef(true)) , Optional.<Path>absent()); // Participant that fails to start due to metric reporter failures String instanceName = HelixUtils.getHelixInstanceName("MetricReporterFailureInstance", 0); Config metricConfig = config.withValue(ConfigurationKeys.METRICS_ENABLED_KEY, ConfigValueFactory.fromAnyRef(true)) .withValue(ConfigurationKeys.METRICS_REPORTING_KAFKA_ENABLED_KEY, ConfigValueFactory.fromAnyRef(true)) .withValue(ConfigurationKeys.METRICS_KAFKA_TOPIC_METRICS, ConfigValueFactory.fromAnyRef("metricTopic")) .withValue(ConfigurationKeys.GOBBLIN_TASK_METRIC_REPORTING_FAILURE_FATAL, ConfigValueFactory.fromAnyRef(true)); this.gobblinTaskRunnerFailedReporter = new GobblinTaskRunner(TestHelper.TEST_APPLICATION_NAME, instanceName, TestHelper.TEST_APPLICATION_ID, "2", metricConfig, Optional.<Path>absent()); // Participant with a partial Instance set up on Helix/ZK this.corruptHelixInstance = HelixUtils.getHelixInstanceName("CorruptHelixInstance", 0); this.corruptGobblinTaskRunner = new GobblinTaskRunner(TestHelper.TEST_APPLICATION_NAME, corruptHelixInstance, TestHelper.TEST_APPLICATION_ID, TestHelper.TEST_TASK_RUNNER_ID, config, Optional.<Path>absent()); // Controller this.gobblinClusterManager = new GobblinClusterManager(TestHelper.TEST_APPLICATION_NAME, TestHelper.TEST_APPLICATION_ID, config, Optional.<Path>absent()); this.gobblinClusterManager.connectHelixManager(); }
Example 20
Source File: ContextFilterFactory.java From incubator-gobblin with Apache License 2.0 | 2 votes |
/** * Modify the configuration to set the {@link ContextFilter} class. * @param config Input {@link Config}. * @param klazz Class of desired {@link ContextFilter}. * @return Modified {@link Config}. */ public static Config setContextFilterClass(Config config, Class<? extends ContextFilter> klazz) { return config.withValue(CONTEXT_FILTER_CLASS, ConfigValueFactory.fromAnyRef(klazz.getCanonicalName())); }