Java Code Examples for org.apache.hadoop.conf.Configuration#writeXml()
The following examples show how to use
org.apache.hadoop.conf.Configuration#writeXml() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestRaidFastCopy.java From RDFS with Apache License 2.0 | 6 votes |
@BeforeClass public static void setUpBeforeClass() throws Exception { conf = new Configuration(); conf.setInt("dfs.block.size", 1024); conf.setClass("fs.hdfs.impl", DistributedRaidFileSystem.class, FileSystem.class); cluster = new MiniDFSCluster(conf, 3, true, null); fs = cluster.getFileSystem(); // Writing conf to disk so that the FastCopy tool picks it up. boolean flag = false; for (String dir : dirs) { if (new File(dir).exists()) { confFile = dir + fileName; String tmpConfFile = confFile + ".tmp"; FileOutputStream out = new FileOutputStream(tmpConfFile); conf.writeXml(out); out.close(); //rename the xml (new File(tmpConfFile)).renameTo(new File(confFile)); flag = true; } } if (!flag) { throw new Exception("Could not write conf file"); } }
Example 2
Source File: TestNodeHealthService.java From RDFS with Apache License 2.0 | 6 votes |
public void testNodeHealthScriptShouldRun() throws IOException { // Node health script should not start if there is no property called // node health script path. assertFalse("Health checker should not have started", NodeHealthCheckerService.shouldRun(new Configuration())); Configuration conf = getConfForNodeHealthScript(); // Node health script should not start if the node health script does not // exists assertFalse("Node health script should start", NodeHealthCheckerService .shouldRun(conf)); // Create script path. conf.writeXml(new FileOutputStream(nodeHealthConfigFile)); writeNodeHealthScriptFile("", false); // Node health script should not start if the node health script is not // executable. assertFalse("Node health script should start", NodeHealthCheckerService .shouldRun(conf)); writeNodeHealthScriptFile("", true); assertTrue("Node health script should start", NodeHealthCheckerService .shouldRun(conf)); }
Example 3
Source File: HFileOutputFormat3.java From kylin with Apache License 2.0 | 6 votes |
public static File configureHConnection(Job job, Configuration hConnectionConf, File tempDir) throws IOException { File tempFile = new File(tempDir, "HConfiguration-" + System.currentTimeMillis() + ".xml"); tempFile.deleteOnExit(); FileOutputStream os = new FileOutputStream(tempFile); hConnectionConf.writeXml(os); os.close(); String tmpFiles = job.getConfiguration().get("tmpfiles", null); if (tmpFiles == null) { tmpFiles = fixWindowsPath("file://" + tempFile.getAbsolutePath()); } else { tmpFiles += "," + fixWindowsPath("file://" + tempFile.getAbsolutePath()); } job.getConfiguration().set("tmpfiles", tmpFiles); LOG.info("A temporary file " + tempFile.getAbsolutePath() + " is created for storing hconnection related configuration!!!"); job.getConfiguration().set(BULKLOAD_HCONNECTION_CONF_KEY, tempFile.getName()); return tempFile; }
Example 4
Source File: YarnTestBase.java From flink with Apache License 2.0 | 5 votes |
public static void writeYarnSiteConfigXML(Configuration yarnConf, File targetFolder) throws IOException { yarnSiteXML = new File(targetFolder, "/yarn-site.xml"); try (FileWriter writer = new FileWriter(yarnSiteXML)) { yarnConf.writeXml(writer); writer.flush(); } }
Example 5
Source File: TonyFetcherTest.java From dr-elephant with Apache License 2.0 | 5 votes |
private static void setupTestTonyConfDir() throws IOException { Configuration testTonyConf = new Configuration(false); testTonyConf.set(TonyConfigurationKeys.TONY_HISTORY_INTERMEDIATE, _intermediateDir.getPath()); testTonyConf.set(TonyConfigurationKeys.TONY_HISTORY_FINISHED, _finishedDir.getPath()); File confDir = Files.createTempDir(); _tonyConfDir = confDir.getPath(); File tonySiteFile = new File(confDir, Constants.TONY_SITE_CONF); testTonyConf.writeXml(new FileOutputStream(tonySiteFile)); }
Example 6
Source File: KeyStoreTestUtil.java From hadoop with Apache License 2.0 | 5 votes |
/** * Saves configuration to a file. * * @param file File to save * @param conf Configuration contents to write to file * @throws IOException if there is an I/O error saving the file */ public static void saveConfig(File file, Configuration conf) throws IOException { Writer writer = new FileWriter(file); try { conf.writeXml(writer); } finally { writer.close(); } }
Example 7
Source File: CsvBulkImportUtilTest.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testInitCsvImportJob() throws IOException { Configuration conf = new Configuration(); char delimiter = '\001'; char quote = '\002'; char escape = '!'; CsvBulkImportUtil.initCsvImportJob(conf, delimiter, quote, escape, null, null); // Serialize and deserialize the config to ensure that there aren't any issues // with non-printable characters as delimiters File tempFile = File.createTempFile("test-config", ".xml"); FileOutputStream fileOutputStream = new FileOutputStream(tempFile); conf.writeXml(fileOutputStream); fileOutputStream.close(); Configuration deserialized = new Configuration(); deserialized.addResource(new FileInputStream(tempFile)); assertEquals(Character.valueOf('\001'), CsvBulkImportUtil.getCharacter(deserialized, CsvToKeyValueMapper.FIELD_DELIMITER_CONFKEY)); assertEquals(Character.valueOf('\002'), CsvBulkImportUtil.getCharacter(deserialized, CsvToKeyValueMapper.QUOTE_CHAR_CONFKEY)); assertEquals(Character.valueOf('!'), CsvBulkImportUtil.getCharacter(deserialized, CsvToKeyValueMapper.ESCAPE_CHAR_CONFKEY)); assertNull(deserialized.get(CsvToKeyValueMapper.ARRAY_DELIMITER_CONFKEY)); tempFile.delete(); }
Example 8
Source File: TestWriteConfigurationToDFS.java From hadoop with Apache License 2.0 | 5 votes |
@Test(timeout=60000) public void testWriteConf() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096); System.out.println("Setting conf in: " + System.identityHashCode(conf)); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); FileSystem fs = null; OutputStream os = null; try { fs = cluster.getFileSystem(); Path filePath = new Path("/testWriteConf.xml"); os = fs.create(filePath); StringBuilder longString = new StringBuilder(); for (int i = 0; i < 100000; i++) { longString.append("hello"); } // 500KB conf.set("foobar", longString.toString()); conf.writeXml(os); os.close(); os = null; fs.close(); fs = null; } finally { IOUtils.cleanup(null, os, fs); cluster.shutdown(); } }
Example 9
Source File: UtilsForTests.java From RDFS with Apache License 2.0 | 5 votes |
static void setUpConfigFile(Properties confProps, File configFile) throws IOException { Configuration config = new Configuration(false); FileOutputStream fos = new FileOutputStream(configFile); for (Enumeration<?> e = confProps.propertyNames(); e.hasMoreElements();) { String key = (String) e.nextElement(); config.set(key, confProps.getProperty(key)); } config.writeXml(fos); fos.close(); }
Example 10
Source File: UtilsForTests.java From big-c with Apache License 2.0 | 5 votes |
static void setUpConfigFile(Properties confProps, File configFile) throws IOException { Configuration config = new Configuration(false); FileOutputStream fos = new FileOutputStream(configFile); for (Enumeration<?> e = confProps.propertyNames(); e.hasMoreElements();) { String key = (String) e.nextElement(); config.set(key, confProps.getProperty(key)); } config.writeXml(fos); fos.close(); }
Example 11
Source File: JobSubmitter.java From big-c with Apache License 2.0 | 5 votes |
private void writeConf(Configuration conf, Path jobFile) throws IOException { // Write job file to JobTracker's fs FSDataOutputStream out = FileSystem.create(jtFs, jobFile, new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION)); try { conf.writeXml(out); } finally { out.close(); } }
Example 12
Source File: TestFileSystemAccessService.java From big-c with Apache License 2.0 | 5 votes |
private void createHadoopConf(Configuration hadoopConf) throws Exception { String dir = TestDirHelper.getTestDir().getAbsolutePath(); File hdfsSite = new File(dir, "hdfs-site.xml"); OutputStream os = new FileOutputStream(hdfsSite); hadoopConf.writeXml(os); os.close(); }
Example 13
Source File: KeyStoreTestUtil.java From hbase with Apache License 2.0 | 5 votes |
/** * Saves configuration to a file. * * @param file File to save * @param conf Configuration contents to write to file * @throws IOException if there is an I/O error saving the file */ public static void saveConfig(File file, Configuration conf) throws IOException { Writer writer = new FileWriter(file); try { conf.writeXml(writer); } finally { writer.close(); } }
Example 14
Source File: CopyMapper.java From big-c with Apache License 2.0 | 5 votes |
/** * Initialize SSL Config if same is set in conf * * @throws IOException - If any */ private void initializeSSLConf(Context context) throws IOException { LOG.info("Initializing SSL configuration"); String workDir = conf.get(JobContext.JOB_LOCAL_DIR) + "/work"; Path[] cacheFiles = context.getLocalCacheFiles(); Configuration sslConfig = new Configuration(false); String sslConfFileName = conf.get(DistCpConstants.CONF_LABEL_SSL_CONF); Path sslClient = findCacheFile(cacheFiles, sslConfFileName); if (sslClient == null) { LOG.warn("SSL Client config file not found. Was looking for " + sslConfFileName + " in " + Arrays.toString(cacheFiles)); return; } sslConfig.addResource(sslClient); String trustStoreFile = conf.get("ssl.client.truststore.location"); Path trustStorePath = findCacheFile(cacheFiles, trustStoreFile); sslConfig.set("ssl.client.truststore.location", trustStorePath.toString()); String keyStoreFile = conf.get("ssl.client.keystore.location"); Path keyStorePath = findCacheFile(cacheFiles, keyStoreFile); sslConfig.set("ssl.client.keystore.location", keyStorePath.toString()); try { OutputStream out = new FileOutputStream(workDir + "/" + sslConfFileName); try { sslConfig.writeXml(out); } finally { out.close(); } conf.set(DistCpConstants.CONF_LABEL_SSL_KEYSTORE, sslConfFileName); } catch (IOException e) { LOG.warn("Unable to write out the ssl configuration. " + "Will fall back to default ssl-client.xml in class path, if there is one", e); } }
Example 15
Source File: YarnTestBase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public static File writeYarnSiteConfigXML(Configuration yarnConf) throws IOException { tmp.create(); File yarnSiteXML = new File(tmp.newFolder().getAbsolutePath() + "/yarn-site.xml"); try (FileWriter writer = new FileWriter(yarnSiteXML)) { yarnConf.writeXml(writer); writer.flush(); } return yarnSiteXML; }
Example 16
Source File: IgniteHadoopFileSystemAbstractSelfTest.java From ignite with Apache License 2.0 | 4 votes |
/** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { Configuration secondaryConf = configurationSecondary(SECONDARY_AUTHORITY); secondaryConf.setInt("fs.igfs.block.size", 1024); String path = U.getIgniteHome() + SECONDARY_CFG_PATH; File file = new File(path); try (FileOutputStream fos = new FileOutputStream(file)) { secondaryConf.writeXml(fos); } startNodes(); }
Example 17
Source File: TestJobHistoryEventHandler.java From big-c with Apache License 2.0 | 4 votes |
@Test (timeout=50000) public void testDefaultFsIsUsedForHistory() throws Exception { // Create default configuration pointing to the minicluster Configuration conf = new Configuration(); conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, dfsCluster.getURI().toString()); FileOutputStream os = new FileOutputStream(coreSitePath); conf.writeXml(os); os.close(); // simulate execution under a non-default namenode conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "file:///"); TestParams t = new TestParams(); conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.dfsWorkDir); JHEvenHandlerForTest realJheh = new JHEvenHandlerForTest(t.mockAppContext, 0, false); JHEvenHandlerForTest jheh = spy(realJheh); jheh.init(conf); try { jheh.start(); handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent( t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1))); handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent( TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(), new Counters(), new Counters()))); // If we got here then event handler worked but we don't know with which // file system. Now we check that history stuff was written to minicluster FileSystem dfsFileSystem = dfsCluster.getFileSystem(); assertTrue("Minicluster contains some history files", dfsFileSystem.globStatus(new Path(t.dfsWorkDir + "/*")).length != 0); FileSystem localFileSystem = LocalFileSystem.get(conf); assertFalse("No history directory on non-default file system", localFileSystem.exists(new Path(t.dfsWorkDir))); } finally { jheh.stop(); } }
Example 18
Source File: BaseMapReduceIT.java From datacollector with Apache License 2.0 | 4 votes |
private static void writeConfiguration(Configuration conf, File outputFile) throws Exception { FileOutputStream outputStream = new FileOutputStream(outputFile); conf.writeXml(outputStream); outputStream.close(); }
Example 19
Source File: SpliceTestYarnPlatform.java From spliceengine with GNU Affero General Public License v3.0 | 4 votes |
public void start(int nodeCount) throws Exception { if (yarnCluster == null) { LOG.info("Starting up YARN cluster with "+nodeCount+" nodes. Server yarn-site.xml is: "+yarnSiteConfigURL); UserGroupInformation ugi; if (secure) ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI("yarn/[email protected]", keytab); else ugi = UserGroupInformation.createRemoteUser("yarn"); UserGroupInformation.setLoginUser(ugi); ugi.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { yarnCluster = new MiniYARNClusterSplice(SpliceTestYarnPlatform.class.getSimpleName(), nodeCount, 1, 1); yarnCluster.init(conf); yarnCluster.start(); return null; } }); NodeManager nm = getNodeManager(); waitForNMToRegister(nm); // save the server config to classpath so yarn clients can read it Configuration yarnClusterConfig = yarnCluster.getConfig(); yarnClusterConfig.set("yarn.application.classpath", new File(yarnSiteConfigURL.getPath()).getParent()); yarnClusterConfig.set("fs.s3a.impl","com.splicemachine.fs.s3.PrestoS3FileSystem"); //write the document to a buffer (not directly to the file, as that //can cause the file being written to get read -which will then fail. ByteArrayOutputStream bytesOut = new ByteArrayOutputStream(); yarnClusterConfig.writeXml(bytesOut); bytesOut.close(); //write the bytes to the file in the classpath OutputStream os = new FileOutputStream(new File(yarnSiteConfigURL.getPath())); os.write(bytesOut.toByteArray()); os.close(); } LOG.info("YARN cluster started."); }
Example 20
Source File: OozieConfigUtil.java From hadoop-mini-clusters with Apache License 2.0 | 4 votes |
public void writeXml(Configuration configuration, String outputLocation) throws IOException { new File(new File(outputLocation).getParent()).mkdirs(); configuration.writeXml(new FileOutputStream(outputLocation)); }