Java Code Examples for org.apache.hadoop.hdfs.server.namenode.NameNode#format()
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.NameNode#format() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHDFSServerPorts.java From RDFS with Apache License 2.0 | 6 votes |
/** * Start the name-node. */ public NameNode startNameNode() throws IOException { String dataDir = System.getProperty("test.build.data"); hdfsDir = new File(dataDir, "dfs"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } config = new Configuration(); config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath()); FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0"); config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0"); NameNode.format(config); String[] args = new String[] {}; // NameNode will modify config with the ports it bound to return NameNode.createNameNode(args, config); }
Example 2
Source File: TestNNThroughputBenchmark.java From RDFS with Apache License 2.0 | 6 votes |
/** * This test runs all benchmarks defined in {@link NNThroughputBenchmark}. * @throws Exception */ public static void main(String[] arg) throws Exception { // make the configuration before benchmark Configuration conf = new Configuration(); FileSystem.setDefaultUri(conf, "hdfs://0.0.0.0:" + 9000); conf.set("dfs.http.address", "0.0.0.0:0"); Random rand = new Random(); String dir = "/tmp/testNN" + rand.nextInt(Integer.MAX_VALUE); conf.set("dfs.name.dir", dir); conf.set("dfs.name.edits.dir", dir); conf.set("dfs.namenode.support.allowformat", "true"); //conf.set("fs.default.name", "hdfs://0.0.0.0:9000"); NameNode.format(conf); // create the first benchmark NNThroughputBenchmark.runBenchmark(conf, Arrays.asList(arg)); }
Example 3
Source File: TestHDFSServerPorts.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * Start the name-node. */ public NameNode startNameNode() throws IOException { String dataDir = System.getProperty("test.build.data"); hdfsDir = new File(dataDir, "dfs"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } config = new Configuration(); config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath()); FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0"); config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0"); NameNode.format(config); String[] args = new String[] {}; // NameNode will modify config with the ports it bound to return NameNode.createNameNode(args, config); }
Example 4
Source File: DFSTestUtil.java From hadoop with Apache License 2.0 | 5 votes |
/** * when formatting a namenode - we must provide clusterid. * @param conf * @throws IOException */ public static void formatNameNode(Configuration conf) throws IOException { String clusterId = StartupOption.FORMAT.getClusterId(); if(clusterId == null || clusterId.isEmpty()) StartupOption.FORMAT.setClusterId("testClusterID"); // Use a copy of conf as it can be altered by namenode during format. NameNode.format(new Configuration(conf)); }
Example 5
Source File: TestNNWithQJM.java From hadoop with Apache License 2.0 | 5 votes |
@Test (timeout = 30000) public void testMismatchedNNIsRejected() throws Exception { conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image"); String defaultEditsDir = conf.get(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, mjc.getQuorumJournalURI("myjournal").toString()); // Start a NN, so the storage is formatted -- both on-disk // and QJM. MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(0) .manageNameDfsDirs(false) .build(); cluster.shutdown(); // Reformat just the on-disk portion Configuration onDiskOnly = new Configuration(conf); onDiskOnly.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, defaultEditsDir); NameNode.format(onDiskOnly); // Start the NN - should fail because the JNs are still formatted // with the old namespace ID. try { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(0) .manageNameDfsDirs(false) .format(false) .build(); fail("New NN with different namespace should have been rejected"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "Unable to start log segment 1: too few journals", ioe); } }
Example 6
Source File: DFSTestUtil.java From big-c with Apache License 2.0 | 5 votes |
/** * when formatting a namenode - we must provide clusterid. * @param conf * @throws IOException */ public static void formatNameNode(Configuration conf) throws IOException { String clusterId = StartupOption.FORMAT.getClusterId(); if(clusterId == null || clusterId.isEmpty()) StartupOption.FORMAT.setClusterId("testClusterID"); // Use a copy of conf as it can be altered by namenode during format. NameNode.format(new Configuration(conf)); }
Example 7
Source File: TestNNWithQJM.java From big-c with Apache License 2.0 | 5 votes |
@Test (timeout = 30000) public void testMismatchedNNIsRejected() throws Exception { conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image"); String defaultEditsDir = conf.get(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, mjc.getQuorumJournalURI("myjournal").toString()); // Start a NN, so the storage is formatted -- both on-disk // and QJM. MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(0) .manageNameDfsDirs(false) .build(); cluster.shutdown(); // Reformat just the on-disk portion Configuration onDiskOnly = new Configuration(conf); onDiskOnly.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, defaultEditsDir); NameNode.format(onDiskOnly); // Start the NN - should fail because the JNs are still formatted // with the old namespace ID. try { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(0) .manageNameDfsDirs(false) .format(false) .build(); fail("New NN with different namespace should have been rejected"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "Unable to start log segment 1: too few journals", ioe); } }
Example 8
Source File: TestDatanodeUpgrade.java From RDFS with Apache License 2.0 | 5 votes |
@After public void tearDown() throws Exception { cluster.finalizeCluster(conf); cluster.shutdown(); NameNode.format(conf); cluster.formatDataNodeDirs(); lock.unlock(); }
Example 9
Source File: TestParallelRBW.java From RDFS with Apache License 2.0 | 5 votes |
@After public void tearDown() throws Exception { cluster.finalizeCluster(conf); cluster.shutdown(); NameNode.format(conf); cluster.formatDataNodeDirs(); lock.unlock(); }
Example 10
Source File: TestHDFSServerPorts.java From RDFS with Apache License 2.0 | 5 votes |
/** * Verify name-node port usage. */ public void testNameNodePorts() throws Exception { NameNode nn = null; try { nn = startNameNode(); // start another namenode on the same port Configuration conf2 = new Configuration(config); conf2.set("dfs.name.dir", new File(hdfsDir, "name2").getPath()); NameNode.format(conf2); boolean started = canStartNameNode(conf2); assertFalse(started); // should fail // start on a different main port FileSystem.setDefaultUri(conf2, "hdfs://"+NAME_NODE_HOST + "0"); started = canStartNameNode(conf2); assertFalse(started); // should fail again // reset conf2 since NameNode modifies it FileSystem.setDefaultUri(conf2, "hdfs://"+NAME_NODE_HOST + "0"); // different http port conf2.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0"); started = canStartNameNode(conf2); assertTrue(started); // should start now } finally { stopNameNode(nn); } }
Example 11
Source File: TestHDFSServerPorts.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Verify name-node port usage. */ public void testNameNodePorts() throws Exception { NameNode nn = null; try { nn = startNameNode(); // start another namenode on the same port Configuration conf2 = new Configuration(config); conf2.set("dfs.name.dir", new File(hdfsDir, "name2").getPath()); NameNode.format(conf2); boolean started = canStartNameNode(conf2); assertFalse(started); // should fail // start on a different main port FileSystem.setDefaultUri(conf2, "hdfs://"+NAME_NODE_HOST + "0"); started = canStartNameNode(conf2); assertFalse(started); // should fail again // reset conf2 since NameNode modifies it FileSystem.setDefaultUri(conf2, "hdfs://"+NAME_NODE_HOST + "0"); // different http port conf2.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0"); started = canStartNameNode(conf2); assertTrue(started); // should start now } finally { stopNameNode(nn); } }
Example 12
Source File: MiniDFSCluster.java From hadoop-gpu with Apache License 2.0 | 4 votes |
/** * NOTE: if possible, the other constructors that don't have nameNode port * parameter should be used as they will ensure that the servers use free ports. * <p> * Modify the config and start up the servers. * * @param nameNodePort suggestion for which rpc port to use. caller should * use getNameNodePort() to get the actual port used. * @param conf the base configuration to use in starting the servers. This * will be modified as necessary. * @param numDataNodes Number of DataNodes to start; may be zero * @param format if true, format the NameNode and DataNodes before starting up * @param manageNameDfsDirs if true, the data directories for servers will be * created and dfs.name.dir and dfs.data.dir will be set in the conf * @param manageDataDfsDirs if true, the data directories for datanodes will * be created and dfs.data.dir set to same in the conf * @param operation the operation with which to start the servers. If null * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. * @param racks array of strings indicating the rack that each DataNode is on * @param hosts array of strings indicating the hostnames of each DataNode * @param simulatedCapacities array of capacities of the simulated data nodes */ public MiniDFSCluster(int nameNodePort, Configuration conf, int numDataNodes, boolean format, boolean manageNameDfsDirs, boolean manageDataDfsDirs, StartupOption operation, String[] racks, String hosts[], long[] simulatedCapacities) throws IOException { this.conf = conf; try { UserGroupInformation.setCurrentUser(UnixUserGroupInformation.login(conf)); } catch (LoginException e) { IOException ioe = new IOException(); ioe.initCause(e); throw ioe; } base_dir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/"); data_dir = new File(base_dir, "data"); // Setup the NameNode configuration FileSystem.setDefaultUri(conf, "hdfs://localhost:"+ Integer.toString(nameNodePort)); conf.set("dfs.http.address", "127.0.0.1:0"); if (manageNameDfsDirs) { conf.set("dfs.name.dir", new File(base_dir, "name1").getPath()+","+ new File(base_dir, "name2").getPath()); conf.set("fs.checkpoint.dir", new File(base_dir, "namesecondary1"). getPath()+"," + new File(base_dir, "namesecondary2").getPath()); } int replication = conf.getInt("dfs.replication", 3); conf.setInt("dfs.replication", Math.min(replication, numDataNodes)); conf.setInt("dfs.safemode.extension", 0); conf.setInt("dfs.namenode.decommission.interval", 3); // 3 second // Format and clean out DataNode directories if (format) { if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) { throw new IOException("Cannot remove data directory: " + data_dir); } NameNode.format(conf); } // Start the NameNode String[] args = (operation == null || operation == StartupOption.FORMAT || operation == StartupOption.REGULAR) ? new String[] {} : new String[] {operation.getName()}; conf.setClass("topology.node.switch.mapping.impl", StaticMapping.class, DNSToSwitchMapping.class); nameNode = NameNode.createNameNode(args, conf); // Start the DataNodes startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks, hosts, simulatedCapacities); waitClusterUp(); }
Example 13
Source File: UpgradeUtilities.java From hadoop-gpu with Apache License 2.0 | 4 votes |
/** * Initialize the data structures used by this class. * IMPORTANT NOTE: This method must be called once before calling * any other public method on this class. * <p> * Creates a singleton master populated storage * directory for a Namenode (contains edits, fsimage, * version, and time files) and a Datanode (contains version and * block files). This can be a lengthy operation. */ public static void initialize() throws Exception { createEmptyDirs(new String[] {TEST_ROOT_DIR.toString()}); Configuration config = new Configuration(); config.set("dfs.name.dir", namenodeStorage.toString()); config.set("dfs.data.dir", datanodeStorage.toString()); MiniDFSCluster cluster = null; try { // format data-node createEmptyDirs(new String[] {datanodeStorage.toString()}); // format and start NameNode and start DataNode NameNode.format(config); cluster = new MiniDFSCluster(config, 1, StartupOption.REGULAR); NameNode namenode = cluster.getNameNode(); namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID(); namenodeStorageFsscTime = namenode.versionRequest().getCTime(); FileSystem fs = FileSystem.get(config); Path baseDir = new Path("/TestUpgrade"); fs.mkdirs(baseDir); // write some files int bufferSize = 4096; byte[] buffer = new byte[bufferSize]; for(int i=0; i < bufferSize; i++) buffer[i] = (byte)('0' + i % 50); writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize); writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize); // save image namenode.getFSImage().saveFSImage(); namenode.getFSImage().getEditLog().open(); // write more files writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize); writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize); } finally { // shutdown if (cluster != null) cluster.shutdown(); FileUtil.fullyDelete(new File(namenodeStorage,"in_use.lock")); FileUtil.fullyDelete(new File(datanodeStorage,"in_use.lock")); } namenodeStorageChecksum = checksumContents( NAME_NODE, new File(namenodeStorage,"current")); datanodeStorageChecksum = checksumContents( DATA_NODE, new File(datanodeStorage,"current")); }