Java Code Examples for org.apache.hadoop.conf.Configuration#addResource()
The following examples show how to use
org.apache.hadoop.conf.Configuration#addResource() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseConfigurationFactory.java From pxf with Apache License 2.0 | 6 votes |
private void processUserResource(Configuration configuration, String serverName, String userName, File directory) { // add user config file as configuration resource try { Path path = Paths.get(String.format("%s/%s-user.xml", directory.toPath(), userName)); if (Files.exists(path)) { Configuration userConfiguration = new Configuration(false); URL resourceURL = path.toUri().toURL(); userConfiguration.addResource(resourceURL); LOG.debug("Adding user properties for server {} from {}", serverName, resourceURL); userConfiguration.forEach(entry -> configuration.set(entry.getKey(), entry.getValue())); configuration.set(String.format("%s.%s", PXF_CONFIG_RESOURCE_PATH_PROPERTY, path.getFileName().toString()), resourceURL.toString()); } } catch (Exception e) { throw new RuntimeException(String.format("Unable to read user configuration for user %s using server %s from %s", userName, serverName, directory.getAbsolutePath()), e); } }
Example 2
Source File: ChaosMonkeyRunner.java From hbase with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { Configuration conf = HBaseConfiguration.create(); String[] actualArgs = args; if (args.length > 0 && "-c".equals(args[0])) { int argCount = args.length - 2; if (argCount < 0) { throw new IllegalArgumentException("Missing path for -c parameter"); } // load the resource specified by the second parameter conf.addResource(args[1]); actualArgs = new String[argCount]; System.arraycopy(args, 2, actualArgs, 0, argCount); } IntegrationTestingUtility.setUseDistributedCluster(conf); int ret = ToolRunner.run(conf, new ChaosMonkeyRunner(), actualArgs); System.exit(ret); }
Example 3
Source File: CassandraApplicationTest.java From examples with Apache License 2.0 | 6 votes |
@Test public void testApplication() throws IOException, Exception { try { LocalMode lma = LocalMode.newInstance(); Configuration conf = new Configuration(false); conf.addResource(this.getClass().getResourceAsStream("/properties-CassandraOutputTestApp.xml")); conf.set("dt.operator.CassandraDataWriter.prop.store.node", "localhost"); conf.set("dt.operator.CassandraDataWriter.prop.store.keyspace", KEYSPACE); conf.set("dt.operator.CassandraDataWriter.prop.tablename", TABLE_NAME); lma.prepareDAG(new Application(), conf); LocalMode.Controller lc = lma.getController(); lc.run(10000); // runs for 10 seconds and quits //validate: Cassandra provides eventual consistency so not checking for exact record count. String recordsQuery = "SELECT * from " + KEYSPACE + "." + TABLE_NAME + ";"; ResultSet resultSetRecords = session.execute(recordsQuery); Assert.assertTrue("No records were added to the table.", resultSetRecords.getAvailableWithoutFetching() > 0); } catch (ConstraintViolationException e) { Assert.fail("constraint violations: " + e.getConstraintViolations()); } }
Example 4
Source File: FileUtil.java From neo4j-mazerunner with Apache License 2.0 | 5 votes |
/** * Gets the HDFS file system and loads in local Hadoop configurations. * @return Returns a distributed FileSystem object. * @throws IOException * @throws URISyntaxException */ public static FileSystem getHadoopFileSystem() throws IOException, URISyntaxException { Configuration hadoopConfiguration = new Configuration(); hadoopConfiguration.addResource(new Path(ConfigurationLoader.getInstance().getHadoopHdfsPath())); hadoopConfiguration.addResource(new Path(ConfigurationLoader.getInstance().getHadoopSitePath())); hadoopConfiguration.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem"); return FileSystem.get(new URI(ConfigurationLoader.getInstance().getHadoopHdfsUri()), hadoopConfiguration); }
Example 5
Source File: ResourceTrackerService.java From hadoop with Apache License 2.0 | 5 votes |
@Override protected void serviceStart() throws Exception { super.serviceStart(); // ResourceTrackerServer authenticates NodeManager via Kerberos if // security is enabled, so no secretManager. Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); this.server = rpc.getServer(ResourceTracker.class, this, resourceTrackerAddress, conf, null, conf.getInt(YarnConfiguration.RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT, YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT)); // Enable service authorization? if (conf.getBoolean( CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) { InputStream inputStream = this.rmContext.getConfigurationProvider() .getConfigurationInputStream(conf, YarnConfiguration.HADOOP_POLICY_CONFIGURATION_FILE); if (inputStream != null) { conf.addResource(inputStream); } refreshServiceAcls(conf, RMPolicyProvider.getInstance()); } this.server.start(); conf.updateConnectAddr(YarnConfiguration.RM_BIND_HOST, YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, server.getListenerAddress()); }
Example 6
Source File: TestSSLHttpServer.java From big-c with Apache License 2.0 | 5 votes |
@BeforeClass public static void setup() throws Exception { conf = new Configuration(); conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10); File base = new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); keystoresDir = new File(BASEDIR).getAbsolutePath(); sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); Configuration sslConf = new Configuration(false); sslConf.addResource("ssl-server.xml"); sslConf.addResource("ssl-client.xml"); clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf); clientSslFactory.init(); server = new HttpServer2.Builder() .setName("test") .addEndpoint(new URI("https://localhost")) .setConf(conf) .keyPassword(sslConf.get("ssl.server.keystore.keypassword")) .keyStore(sslConf.get("ssl.server.keystore.location"), sslConf.get("ssl.server.keystore.password"), sslConf.get("ssl.server.keystore.type", "jks")) .trustStore(sslConf.get("ssl.server.truststore.location"), sslConf.get("ssl.server.truststore.password"), sslConf.get("ssl.server.truststore.type", "jks")).build(); server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class); server.addServlet("longheader", "/longheader", LongHeaderServlet.class); server.start(); baseUrl = new URL("https://" + NetUtils.getHostPortString(server.getConnectorAddress(0))); LOG.info("HTTP server started: " + baseUrl); }
Example 7
Source File: ApplicationTest.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
@Test public void testApplication() throws IOException, Exception { try { LocalMode lma = LocalMode.newInstance(); Configuration conf = new Configuration(false); conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties.xml")); lma.prepareDAG(new Application(), conf); LocalMode.Controller lc = lma.getController(); lc.run(10000); // runs for 10 seconds and quits } catch (ConstraintViolationException e) { Assert.fail("constraint violations: " + e.getConstraintViolations()); } }
Example 8
Source File: HBaseConfiguration.java From hbase with Apache License 2.0 | 5 votes |
public static Configuration addHbaseResources(Configuration conf) { conf.addResource("hbase-default.xml"); conf.addResource("hbase-site.xml"); checkDefaultsVersion(conf); return conf; }
Example 9
Source File: TestSSLHttpServer.java From hadoop with Apache License 2.0 | 5 votes |
@BeforeClass public static void setup() throws Exception { conf = new Configuration(); conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10); File base = new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); keystoresDir = new File(BASEDIR).getAbsolutePath(); sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); Configuration sslConf = new Configuration(false); sslConf.addResource("ssl-server.xml"); sslConf.addResource("ssl-client.xml"); clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf); clientSslFactory.init(); server = new HttpServer2.Builder() .setName("test") .addEndpoint(new URI("https://localhost")) .setConf(conf) .keyPassword(sslConf.get("ssl.server.keystore.keypassword")) .keyStore(sslConf.get("ssl.server.keystore.location"), sslConf.get("ssl.server.keystore.password"), sslConf.get("ssl.server.keystore.type", "jks")) .trustStore(sslConf.get("ssl.server.truststore.location"), sslConf.get("ssl.server.truststore.password"), sslConf.get("ssl.server.truststore.type", "jks")).build(); server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class); server.addServlet("longheader", "/longheader", LongHeaderServlet.class); server.start(); baseUrl = new URL("https://" + NetUtils.getHostPortString(server.getConnectorAddress(0))); LOG.info("HTTP server started: " + baseUrl); }
Example 10
Source File: TestIndexingFilters.java From nutch-htmlunit with Apache License 2.0 | 5 votes |
/** * Test behaviour when defined filter does not exist. * @throws IndexingException */ public void testNonExistingIndexingFilter() throws IndexingException { Configuration conf = NutchConfiguration.create(); conf.addResource("nutch-default.xml"); conf.addResource("crawl-tests.xml"); String class1 = "NonExistingFilter"; String class2 = "org.apache.nutch.indexer.basic.BasicIndexingFilter"; conf.set(IndexingFilters.INDEXINGFILTER_ORDER, class1 + " " + class2); IndexingFilters filters = new IndexingFilters(conf); filters.filter(new NutchDocument(), new ParseImpl("text", new ParseData( new ParseStatus(), "title", new Outlink[0], new Metadata())), new Text( "http://www.example.com/"), new CrawlDatum(), new Inlinks()); }
Example 11
Source File: AlertGenerator.java From jumbune with GNU Lesser General Public License v3.0 | 5 votes |
/** * Gets the minimum parameter mandatory for container. * * @param parameter the parameter * @param value the value * @param cluster the cluster * @return the minimum parameter mandatory for container */ private int getMinimumParameterMandatoryForContainer(String parameter , int value, Cluster cluster) { Configuration c = new Configuration(); String hadoopConfDir = RemotingUtil.getHadoopConfigurationDirPath(cluster); String localConfFilePath = ConfigurationUtil.getLocalConfigurationFilePath(cluster)+ExtendedConstants.YARN_SITE_XML; File file = new File(localConfFilePath); if(!file.exists()){ String filePath = RemotingUtil.addHadoopResource(c, cluster, hadoopConfDir, ExtendedConstants.YARN_SITE_XML); c.addResource(new Path(filePath)); } c.addResource(new Path(localConfFilePath)); return c.getInt(parameter, value); }
Example 12
Source File: SLSRunner.java From hadoop with Apache License 2.0 | 5 votes |
public SLSRunner(boolean isSLS, String inputTraces[], String nodeFile, String outputDir, Set<String> trackedApps, boolean printsimulation) throws IOException, ClassNotFoundException { this.isSLS = isSLS; this.inputTraces = inputTraces.clone(); this.nodeFile = nodeFile; this.trackedApps = trackedApps; this.printSimulation = printsimulation; metricsOutputDir = outputDir; nmMap = new HashMap<NodeId, NMSimulator>(); queueAppNumMap = new HashMap<String, Integer>(); amMap = new HashMap<String, AMSimulator>(); amClassMap = new HashMap<String, Class>(); // runner configuration conf = new Configuration(false); conf.addResource("sls-runner.xml"); // runner int poolSize = conf.getInt(SLSConfiguration.RUNNER_POOL_SIZE, SLSConfiguration.RUNNER_POOL_SIZE_DEFAULT); SLSRunner.runner.setQueueSize(poolSize); // <AMType, Class> map for (Map.Entry e : conf) { String key = e.getKey().toString(); if (key.startsWith(SLSConfiguration.AM_TYPE)) { String amType = key.substring(SLSConfiguration.AM_TYPE.length()); amClassMap.put(amType, Class.forName(conf.get(key))); } } }
Example 13
Source File: StramClientUtils.java From attic-apex-core with Apache License 2.0 | 5 votes |
public static void addDTLocalResources(Configuration conf) { conf.addResource(DT_DEFAULT_XML_FILE); if (!isDevelopmentMode()) { addDTSiteResources(conf, new File(StramClientUtils.getConfigDir(), StramClientUtils.DT_SITE_XML_FILE)); } addDTSiteResources(conf, new File(StramClientUtils.getUserDTDirectory(), StramClientUtils.DT_SITE_XML_FILE)); }
Example 14
Source File: QueueManager.java From RDFS with Apache License 2.0 | 5 votes |
private HashMap<String, AccessControlList> getQueueAcls(Configuration conf) { checkDeprecation(conf); conf.addResource(QUEUE_ACLS_FILE_NAME); HashMap<String, AccessControlList> aclsMap = new HashMap<String, AccessControlList>(); for (String queue : queueNames) { for (QueueOperation oper : QueueOperation.values()) { String key = toFullPropertyName(queue, oper.getAclName()); String aclString = conf.get(key, "*"); aclsMap.put(key, new AccessControlList(aclString)); } } return aclsMap; }
Example 15
Source File: HBasePluginUtil.java From DataLink with Apache License 2.0 | 5 votes |
public static void caclRegionCount(HBaseRegionCountEvent event) { Configuration configuration = HBaseConfiguration.create(); HBaseMediaSrcParameter hbaseParameter = event.getHbaseParameter(); String tableName = event.getTableName(); ZkMediaSrcParameter zkParameter = event.getZkParameter(); String hosts = zkParameter.parseServersToString(); String port = zkParameter.parsePort() + ""; String znode = hbaseParameter.getZnodeParent(); HBaseAdmin admin = null; HTable htable = null; int regionsCount = -1; try { configuration.set("hbase.zookeeper.quorum", hosts); configuration.set("hbase.zookeeper.property.clientPort", port); configuration.set("zookeeper.znode.parent", znode); if (StringUtils.isNotEmpty(hbaseParameter.getKdc())) { System.setProperty("java.security.krb5.realm", hbaseParameter.getRealm()); System.setProperty("java.security.krb5.kdc",hbaseParameter.getKdc()); configuration.addResource(new Path(hbaseParameter.getHbaseSitePath())); UserGroupInformation.setConfiguration(configuration); UserGroupInformation.loginUserFromKeytab(hbaseParameter.getLoginPrincipal(), hbaseParameter.getLoginKeytabPath()); } admin = new HBaseAdmin(configuration); htable = new HTable(configuration, tableName); Pair<byte[][], byte[][]> regionRanges = htable.getStartEndKeys(); regionsCount = regionRanges.getFirst().length; event.getCallback().onCompletion(null, new Integer(regionsCount)); } catch (Exception e) { if (admin != null) { try { admin.close(); } catch (IOException ex) { logger.error(e.getMessage(), e); event.getCallback().onCompletion(e, new Integer(-1)); } } event.getCallback().onCompletion(e, new Integer(-1)); } }
Example 16
Source File: TestConfigurationFieldsBase.java From hadoop with Apache License 2.0 | 4 votes |
/** * Pull properties and values from filename. * * @param filename XML filename * @return HashMap containing <Property,Value> entries from XML file */ private HashMap<String,String> extractPropertiesFromXml (String filename) { if (filename==null) { return null; } // Iterate through XML file for name/value pairs Configuration conf = new Configuration(false); conf.setAllowNullValueProperties(true); conf.addResource(filename); HashMap<String,String> retVal = new HashMap<String,String>(); Iterator<Map.Entry<String,String>> kvItr = conf.iterator(); while (kvItr.hasNext()) { Map.Entry<String,String> entry = kvItr.next(); String key = entry.getKey(); // Ignore known xml props if (xmlPropsToSkipCompare != null) { if (xmlPropsToSkipCompare.contains(key)) { continue; } } // Ignore known xml prefixes boolean skipPrefix = false; if (xmlPrefixToSkipCompare != null) { for (String xmlPrefix : xmlPrefixToSkipCompare) { if (key.startsWith(xmlPrefix)) { skipPrefix = true; break; } } } if (skipPrefix) { continue; } if (conf.onlyKeyExists(key)) { retVal.put(key,null); } else { String value = conf.get(key); if (value!=null) { retVal.put(key,entry.getValue()); } } kvItr.remove(); } return retVal; }
Example 17
Source File: ParseLogJob.java From 163-bigdate-note with GNU General Public License v3.0 | 4 votes |
public int run(String[] args) throws Exception { //创建job Configuration config = getConf(); //添加自定义配置 // config.set("ip.file.path", args[2]); config.addResource("mr.xml"); Job job = Job.getInstance(config); //通过job设置一些参数 job.setJarByClass(ParseLogJob.class); job.setJobName("parselog"); job.setMapperClass(LogMapper.class); //设置reduce个数为0 job.setReducerClass(LogReducer.class); job.setMapOutputKeyClass(LongWritable.class); job.setMapOutputValueClass(LogWritable.class); job.setOutputValueClass(Text.class); //添加分布式缓存 job.addCacheFile(new URI(config.get("ip.file.path"))); //添加输入和输出数据 FileInputFormat.addInputPath(job, new Path(args[0])); Path outputPath = new Path(args[1]); FileOutputFormat.setOutputPath(job, outputPath); //设置压缩类型 // FileOutputFormat.setCompressOutput(job, true); // FileOutputFormat.setOutputCompressorClass(job, LzopCodec.class); FileSystem fs = FileSystem.get(config); if (fs.exists(outputPath)) { fs.delete(outputPath, true); } //运行程序 if (!job.waitForCompletion(true)) { throw new RuntimeException(job.getJobName() + "failed!"); } return 0; }
Example 18
Source File: AtomicFileOutputAppTest.java From examples with Apache License 2.0 | 4 votes |
@Test public void testApplication() throws Exception { try { File targetDir = new File(TARGET_DIR); FileUtils.deleteDirectory(targetDir); FileUtils.forceMkdir(targetDir); // produce some test data KafkaTestProducer p = new KafkaTestProducer(KAFKA_TOPIC); String[] words = "count the words from kafka and store them in the db".split("\\s+"); p.setMessages(Lists.newArrayList(words)); new Thread(p).start(); LocalMode lma = LocalMode.newInstance(); Configuration conf = new Configuration(false); conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties.xml")); conf.set("dt.operator.kafkaInput.prop.topic", KAFKA_TOPIC); conf.set("dt.operator.kafkaInput.prop.zookeeper", "localhost:" + KafkaOperatorTestBase.TEST_ZOOKEEPER_PORT[0]); conf.set("dt.operator.kafkaInput.prop.maxTuplesPerWindow", "1"); // consume one word per window conf.set("dt.operator.fileWriter.prop.filePath", TARGET_DIR); lma.prepareDAG(new AtomicFileOutputApp(), conf); LocalMode.Controller lc = lma.getController(); lc.runAsync(); // test will terminate after results are available long timeout = System.currentTimeMillis() + 60000; // 60s timeout File outputFile = new File(TARGET_DIR, AtomicFileOutputApp.FileWriter.FILE_NAME_PREFIX); while (!outputFile.exists() && timeout > System.currentTimeMillis()) { Thread.sleep(1000); LOG.debug("Waiting for {}", outputFile); } Assert.assertTrue("output file exists " + AtomicFileOutputApp.FileWriter.FILE_NAME_PREFIX, outputFile.exists() && outputFile.isFile()); lc.shutdown(); } catch (ConstraintViolationException e) { Assert.fail("constraint violations: " + e.getConstraintViolations()); } }
Example 19
Source File: PureStyleSQLApplicationTest.java From attic-apex-malhar with Apache License 2.0 | 4 votes |
@Test public void test() throws Exception { LocalMode lma = LocalMode.newInstance(); Configuration conf = new Configuration(false); conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties.xml")); conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties-PureStyleSQLApplication.xml")); conf.set("broker", kafka.getBroker()); conf.set("topic", testTopicData); conf.set("outputFolder", outputFolder); conf.set("destFileName", "out.tmp"); PureStyleSQLApplication app = new PureStyleSQLApplication(); lma.prepareDAG(app, conf); LocalMode.Controller lc = lma.getController(); lc.runAsync(); kafka.publish(testTopicData, Arrays.asList( "15/02/2016 10:15:00 +0000,1,paint1,11", "15/02/2016 10:16:00 +0000,2,paint2,12", "15/02/2016 10:17:00 +0000,3,paint3,13", "15/02/2016 10:18:00 +0000,4,paint4,14", "15/02/2016 10:19:00 +0000,5,paint5,15", "15/02/2016 10:10:00 +0000,6,abcde6,16")); Assert.assertTrue(waitTillFileIsPopulated(outputFolder, 40000)); lc.shutdown(); File file = new File(outputFolder); File file1 = new File(outputFolder + file.list()[0]); List<String> strings = FileUtils.readLines(file1); String[] actualLines = strings.toArray(new String[strings.size()]); String[] expectedLines = new String[]{ "15/02/2016 10:18:00 +0000,15/02/2016 12:00:00 +0000,OILPAINT4", "", "15/02/2016 10:19:00 +0000,15/02/2016 12:00:00 +0000,OILPAINT5", ""}; Assert.assertEquals(expectedLines.length, actualLines.length); for (int i = 0;i < expectedLines.length; i++) { Assert.assertEquals(expectedLines[i], actualLines[i]); } }
Example 20
Source File: CapacitySchedulerConf.java From hadoop-gpu with Apache License 2.0 | 2 votes |
/** * Create a new ResourceManagerConf. * This method reads from the default configuration file mentioned in * {@link RM_CONF_FILE}, that must be present in the classpath of the * application. */ public CapacitySchedulerConf() { rmConf = new Configuration(false); rmConf.addResource(SCHEDULER_CONF_FILE); initializeDefaults(); }