Java Code Examples for org.apache.hadoop.hdfs.server.namenode.NameNode#createNameNode()
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.NameNode#createNameNode() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MiniDFSCluster.java From hadoop with Apache License 2.0 | 6 votes |
/** * Restart the namenode at a given index. Optionally wait for the cluster * to become active. */ public synchronized void restartNameNode(int nnIndex, boolean waitActive, String... args) throws IOException { String nameserviceId = nameNodes[nnIndex].nameserviceId; String nnId = nameNodes[nnIndex].nnId; StartupOption startOpt = nameNodes[nnIndex].startOpt; Configuration conf = nameNodes[nnIndex].conf; shutdownNameNode(nnIndex); if (args.length != 0) { startOpt = null; } else { args = createArgs(startOpt); } NameNode nn = NameNode.createNameNode(args, conf); nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, startOpt, conf); if (waitActive) { waitClusterUp(); LOG.info("Restarted the namenode"); waitActive(); } }
Example 2
Source File: TestHDFSServerPorts.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * Start the name-node. */ public NameNode startNameNode() throws IOException { String dataDir = System.getProperty("test.build.data"); hdfsDir = new File(dataDir, "dfs"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } config = new Configuration(); config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath()); FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0"); config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0"); NameNode.format(config); String[] args = new String[] {}; // NameNode will modify config with the ports it bound to return NameNode.createNameNode(args, config); }
Example 3
Source File: NNThroughputBenchmark.java From RDFS with Apache License 2.0 | 6 votes |
NNThroughputBenchmark(Configuration conf) throws IOException, LoginException { config = conf; ugi = UnixUserGroupInformation.login(config); UserGroupInformation.setCurrentUser(ugi); // We do not need many handlers, since each thread simulates a handler // by calling name-node methods directly config.setInt("dfs.namenode.handler.count", 1); // set exclude file config.set("dfs.hosts.exclude", "${hadoop.tmp.dir}/dfs/hosts/exclude"); File excludeFile = new File(config.get("dfs.hosts.exclude", "exclude")); if (!excludeFile.exists()) { if (!excludeFile.getParentFile().mkdirs()) throw new IOException("NNThroughputBenchmark: cannot mkdir " + excludeFile); } new FileOutputStream(excludeFile).close(); // Start the NameNode String[] argv = new String[] {}; nameNode = NameNode.createNameNode(argv, config); // creat the result file res = new PrintStream(new FileOutputStream("/tmp/nnbenchmark.out")); }
Example 4
Source File: TestHDFSServerPorts.java From RDFS with Apache License 2.0 | 6 votes |
/** * Start the name-node. */ public NameNode startNameNode() throws IOException { String dataDir = System.getProperty("test.build.data"); hdfsDir = new File(dataDir, "dfs"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } config = new Configuration(); config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath()); FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0"); config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0"); NameNode.format(config); String[] args = new String[] {}; // NameNode will modify config with the ports it bound to return NameNode.createNameNode(args, config); }
Example 5
Source File: TestHDFSServerPorts.java From big-c with Apache License 2.0 | 6 votes |
/** * Start the BackupNode */ public BackupNode startBackupNode(Configuration conf) throws IOException { // Set up testing environment directories hdfsDir = new File(TEST_DATA_DIR, "backupNode"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } File currDir = new File(hdfsDir, "name2"); File currDir2 = new File(currDir, "current"); File currDir3 = new File(currDir, "image"); assertTrue(currDir.mkdirs()); assertTrue(currDir2.mkdirs()); assertTrue(currDir3.mkdirs()); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name2")).toString()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}"); // Start BackupNode String[] args = new String [] { StartupOption.BACKUP.getName() }; BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf); return bu; }
Example 6
Source File: TestHDFSServerPorts.java From big-c with Apache License 2.0 | 6 votes |
/** * Start the namenode. */ public NameNode startNameNode(boolean withService) throws IOException { hdfsDir = new File(TEST_DATA_DIR, "dfs"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } config = new HdfsConfiguration(); config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name1")).toString()); FileSystem.setDefaultUri(config, "hdfs://" + THIS_HOST); if (withService) { NameNode.setServiceAddress(config, THIS_HOST); } config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST); DFSTestUtil.formatNameNode(config); String[] args = new String[] {}; // NameNode will modify config with the ports it bound to return NameNode.createNameNode(args, config); }
Example 7
Source File: MiniDFSCluster.java From big-c with Apache License 2.0 | 6 votes |
/** * Restart the namenode at a given index. Optionally wait for the cluster * to become active. */ public synchronized void restartNameNode(int nnIndex, boolean waitActive, String... args) throws IOException { String nameserviceId = nameNodes[nnIndex].nameserviceId; String nnId = nameNodes[nnIndex].nnId; StartupOption startOpt = nameNodes[nnIndex].startOpt; Configuration conf = nameNodes[nnIndex].conf; shutdownNameNode(nnIndex); if (args.length != 0) { startOpt = null; } else { args = createArgs(startOpt); } NameNode nn = NameNode.createNameNode(args, conf); nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, startOpt, conf); if (waitActive) { waitClusterUp(); LOG.info("Restarted the namenode"); waitActive(); } }
Example 8
Source File: TestHDFSServerPorts.java From hadoop with Apache License 2.0 | 6 votes |
/** * Start the BackupNode */ public BackupNode startBackupNode(Configuration conf) throws IOException { // Set up testing environment directories hdfsDir = new File(TEST_DATA_DIR, "backupNode"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } File currDir = new File(hdfsDir, "name2"); File currDir2 = new File(currDir, "current"); File currDir3 = new File(currDir, "image"); assertTrue(currDir.mkdirs()); assertTrue(currDir2.mkdirs()); assertTrue(currDir3.mkdirs()); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name2")).toString()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}"); // Start BackupNode String[] args = new String [] { StartupOption.BACKUP.getName() }; BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf); return bu; }
Example 9
Source File: TestHDFSServerPorts.java From hadoop with Apache License 2.0 | 6 votes |
/** * Start the namenode. */ public NameNode startNameNode(boolean withService) throws IOException { hdfsDir = new File(TEST_DATA_DIR, "dfs"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } config = new HdfsConfiguration(); config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name1")).toString()); FileSystem.setDefaultUri(config, "hdfs://" + THIS_HOST); if (withService) { NameNode.setServiceAddress(config, THIS_HOST); } config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST); DFSTestUtil.formatNameNode(config); String[] args = new String[] {}; // NameNode will modify config with the ports it bound to return NameNode.createNameNode(args, config); }
Example 10
Source File: TestHDFSServerPorts.java From hadoop with Apache License 2.0 | 5 votes |
/** * Check whether the namenode can be started. */ private boolean canStartNameNode(Configuration conf) throws IOException { NameNode nn2 = null; try { nn2 = NameNode.createNameNode(new String[]{}, conf); } catch(IOException e) { if (e instanceof java.net.BindException) return false; throw e; } finally { stopNameNode(nn2); } return true; }
Example 11
Source File: TestHDFSServerPorts.java From big-c with Apache License 2.0 | 5 votes |
/** * Check whether the namenode can be started. */ private boolean canStartNameNode(Configuration conf) throws IOException { NameNode nn2 = null; try { nn2 = NameNode.createNameNode(new String[]{}, conf); } catch(IOException e) { if (e instanceof java.net.BindException) return false; throw e; } finally { stopNameNode(nn2); } return true; }
Example 12
Source File: MiniDFSCluster.java From RDFS with Apache License 2.0 | 5 votes |
public synchronized void restartNameNode(int nnIndex, String[] argv, boolean waitActive) throws IOException { shutdownNameNode(nnIndex); Configuration conf = nameNodes[nnIndex].conf; NameNode nn = NameNode.createNameNode(argv, conf); nameNodes[nnIndex] = new NameNodeInfo(nn, conf); if (!waitActive) { return; } waitClusterUp(); System.out.println("Restarted the namenode"); int failedCount = 0; while (true) { try { waitActive(); break; } catch (IOException e) { failedCount++; // Cached RPC connection to namenode, if any, is expected to fail once if (failedCount > 5) { System.out.println("Tried waitActive() " + failedCount + " time(s) and failed, giving up. " + StringUtils.stringifyException(e)); throw e; } } } System.out.println("Cluster is active"); }
Example 13
Source File: TestHDFSServerPorts.java From RDFS with Apache License 2.0 | 5 votes |
/** * Check whether the name-node can be started. */ private boolean canStartNameNode(Configuration conf) throws IOException { NameNode nn2 = null; try { nn2 = NameNode.createNameNode(new String[]{}, conf); } catch(IOException e) { if (e instanceof java.net.BindException) return false; throw e; } stopNameNode(nn2); return true; }
Example 14
Source File: TestHDFSServerPorts.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Check whether the name-node can be started. */ private boolean canStartNameNode(Configuration conf) throws IOException { NameNode nn2 = null; try { nn2 = NameNode.createNameNode(new String[]{}, conf); } catch(IOException e) { if (e instanceof java.net.BindException) return false; throw e; } stopNameNode(nn2); return true; }
Example 15
Source File: MiniDFSCluster.java From big-c with Apache License 2.0 | 4 votes |
private void createNameNode(int nnIndex, Configuration conf, int numDataNodes, boolean format, StartupOption operation, String clusterId, String nameserviceId, String nnId) throws IOException { // Format and clean out DataNode directories if (format) { DFSTestUtil.formatNameNode(conf); } if (operation == StartupOption.UPGRADE){ operation.setClusterId(clusterId); } // Start the NameNode after saving the default file system. String originalDefaultFs = conf.get(FS_DEFAULT_NAME_KEY); String[] args = createArgs(operation); NameNode nn = NameNode.createNameNode(args, conf); if (operation == StartupOption.RECOVER) { return; } // After the NN has started, set back the bound ports into // the conf conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId), nn.getNameNodeAddressHostPortString()); if (nn.getHttpAddress() != null) { conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpAddress())); } if (nn.getHttpsAddress() != null) { conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTPS_ADDRESS_KEY, nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpsAddress())); } DFSUtil.setGenericConf(conf, nameserviceId, nnId, DFS_NAMENODE_HTTP_ADDRESS_KEY); nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, operation, new Configuration(conf)); // Restore the default fs name if (originalDefaultFs == null) { conf.set(FS_DEFAULT_NAME_KEY, ""); } else { conf.set(FS_DEFAULT_NAME_KEY, originalDefaultFs); } }
Example 16
Source File: MiniDFSCluster.java From hadoop-gpu with Apache License 2.0 | 4 votes |
/** * NOTE: if possible, the other constructors that don't have nameNode port * parameter should be used as they will ensure that the servers use free ports. * <p> * Modify the config and start up the servers. * * @param nameNodePort suggestion for which rpc port to use. caller should * use getNameNodePort() to get the actual port used. * @param conf the base configuration to use in starting the servers. This * will be modified as necessary. * @param numDataNodes Number of DataNodes to start; may be zero * @param format if true, format the NameNode and DataNodes before starting up * @param manageNameDfsDirs if true, the data directories for servers will be * created and dfs.name.dir and dfs.data.dir will be set in the conf * @param manageDataDfsDirs if true, the data directories for datanodes will * be created and dfs.data.dir set to same in the conf * @param operation the operation with which to start the servers. If null * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. * @param racks array of strings indicating the rack that each DataNode is on * @param hosts array of strings indicating the hostnames of each DataNode * @param simulatedCapacities array of capacities of the simulated data nodes */ public MiniDFSCluster(int nameNodePort, Configuration conf, int numDataNodes, boolean format, boolean manageNameDfsDirs, boolean manageDataDfsDirs, StartupOption operation, String[] racks, String hosts[], long[] simulatedCapacities) throws IOException { this.conf = conf; try { UserGroupInformation.setCurrentUser(UnixUserGroupInformation.login(conf)); } catch (LoginException e) { IOException ioe = new IOException(); ioe.initCause(e); throw ioe; } base_dir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/"); data_dir = new File(base_dir, "data"); // Setup the NameNode configuration FileSystem.setDefaultUri(conf, "hdfs://localhost:"+ Integer.toString(nameNodePort)); conf.set("dfs.http.address", "127.0.0.1:0"); if (manageNameDfsDirs) { conf.set("dfs.name.dir", new File(base_dir, "name1").getPath()+","+ new File(base_dir, "name2").getPath()); conf.set("fs.checkpoint.dir", new File(base_dir, "namesecondary1"). getPath()+"," + new File(base_dir, "namesecondary2").getPath()); } int replication = conf.getInt("dfs.replication", 3); conf.setInt("dfs.replication", Math.min(replication, numDataNodes)); conf.setInt("dfs.safemode.extension", 0); conf.setInt("dfs.namenode.decommission.interval", 3); // 3 second // Format and clean out DataNode directories if (format) { if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) { throw new IOException("Cannot remove data directory: " + data_dir); } NameNode.format(conf); } // Start the NameNode String[] args = (operation == null || operation == StartupOption.FORMAT || operation == StartupOption.REGULAR) ? new String[] {} : new String[] {operation.getName()}; conf.setClass("topology.node.switch.mapping.impl", StaticMapping.class, DNSToSwitchMapping.class); nameNode = NameNode.createNameNode(args, conf); // Start the DataNodes startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks, hosts, simulatedCapacities); waitClusterUp(); }
Example 17
Source File: MiniDFSCluster.java From hadoop with Apache License 2.0 | 4 votes |
private void createNameNode(int nnIndex, Configuration conf, int numDataNodes, boolean format, StartupOption operation, String clusterId, String nameserviceId, String nnId) throws IOException { // Format and clean out DataNode directories if (format) { DFSTestUtil.formatNameNode(conf); } if (operation == StartupOption.UPGRADE){ operation.setClusterId(clusterId); } // Start the NameNode after saving the default file system. String originalDefaultFs = conf.get(FS_DEFAULT_NAME_KEY); String[] args = createArgs(operation); NameNode nn = NameNode.createNameNode(args, conf); if (operation == StartupOption.RECOVER) { return; } // After the NN has started, set back the bound ports into // the conf conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId), nn.getNameNodeAddressHostPortString()); if (nn.getHttpAddress() != null) { conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpAddress())); } if (nn.getHttpsAddress() != null) { conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTPS_ADDRESS_KEY, nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpsAddress())); } DFSUtil.setGenericConf(conf, nameserviceId, nnId, DFS_NAMENODE_HTTP_ADDRESS_KEY); nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, operation, new Configuration(conf)); // Restore the default fs name if (originalDefaultFs == null) { conf.set(FS_DEFAULT_NAME_KEY, ""); } else { conf.set(FS_DEFAULT_NAME_KEY, originalDefaultFs); } }