org.apache.hadoop.hdfs.server.namenode.NameNode Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.NameNode.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestDFSHAAdminMiniCluster.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testStateTransition() throws Exception { NameNode nnode1 = cluster.getNameNode(0); assertTrue(nnode1.isStandbyState()); assertEquals(0, runTool("-transitionToActive", "nn1")); assertFalse(nnode1.isStandbyState()); assertEquals(0, runTool("-transitionToStandby", "nn1")); assertTrue(nnode1.isStandbyState()); NameNode nnode2 = cluster.getNameNode(1); assertTrue(nnode2.isStandbyState()); assertEquals(0, runTool("-transitionToActive", "nn2")); assertFalse(nnode2.isStandbyState()); assertEquals(0, runTool("-transitionToStandby", "nn2")); assertTrue(nnode2.isStandbyState()); }
Example #2
Source File: TestHAStateTransitions.java From hadoop with Apache License 2.0 | 6 votes |
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster, NameNode nn, boolean writeHeader) throws IOException { long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId(); URI sharedEditsUri = cluster.getSharedEditsDir(0, 1); File sharedEditsDir = new File(sharedEditsUri.getPath()); StorageDirectory storageDir = new StorageDirectory(sharedEditsDir); File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir, txid + 1); assertTrue("Failed to create in-progress edits file", inProgressFile.createNewFile()); if (writeHeader) { DataOutputStream out = new DataOutputStream(new FileOutputStream( inProgressFile)); EditLogFileOutputStream.writeHeader( NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out); out.close(); } }
Example #3
Source File: DelegationTokenSecretManager.java From hadoop with Apache License 2.0 | 6 votes |
private synchronized void saveAllKeys(DataOutputStream out, String sdPath) throws IOException { StartupProgress prog = NameNode.getStartupProgress(); Step step = new Step(StepType.DELEGATION_KEYS, sdPath); prog.beginStep(Phase.SAVING_CHECKPOINT, step); prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size()); Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); out.writeInt(allKeys.size()); Iterator<Integer> iter = allKeys.keySet().iterator(); while (iter.hasNext()) { Integer key = iter.next(); allKeys.get(key).write(out); counter.increment(); } prog.endStep(Phase.SAVING_CHECKPOINT, step); }
Example #4
Source File: TestHDFSServerPorts.java From RDFS with Apache License 2.0 | 6 votes |
/** * Verify secondary name-node port usage. */ public void testSecondaryNodePorts() throws Exception { NameNode nn = null; try { nn = startNameNode(); // bind http server to the same port as name-node Configuration conf2 = new Configuration(config); conf2.set("dfs.secondary.http.address", config.get("dfs.http.address")); SecondaryNameNode.LOG.info("= Starting 1 on: " + conf2.get("dfs.secondary.http.address")); boolean started = canStartSecondaryNode(conf2); assertFalse(started); // should fail // bind http server to a different port conf2.set("dfs.secondary.http.address", NAME_NODE_HTTP_HOST + "0"); SecondaryNameNode.LOG.info("= Starting 2 on: " + conf2.get("dfs.secondary.http.address")); started = canStartSecondaryNode(conf2); assertTrue(started); // should start now } finally { stopNameNode(nn); } }
Example #5
Source File: DelegationTokenSecretManager.java From big-c with Apache License 2.0 | 6 votes |
/** * Private helper methods to save delegation keys and tokens in fsimage */ private synchronized void saveCurrentTokens(DataOutputStream out, String sdPath) throws IOException { StartupProgress prog = NameNode.getStartupProgress(); Step step = new Step(StepType.DELEGATION_TOKENS, sdPath); prog.beginStep(Phase.SAVING_CHECKPOINT, step); prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size()); Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); out.writeInt(currentTokens.size()); Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet() .iterator(); while (iter.hasNext()) { DelegationTokenIdentifier id = iter.next(); id.write(out); DelegationTokenInformation info = currentTokens.get(id); out.writeLong(info.getRenewDate()); counter.increment(); } prog.endStep(Phase.SAVING_CHECKPOINT, step); }
Example #6
Source File: IPFailoverProxyProvider.java From big-c with Apache License 2.0 | 6 votes |
@Override public synchronized ProxyInfo<T> getProxy() { // Create a non-ha proxy if not already created. if (nnProxyInfo == null) { try { // Create a proxy that is not wrapped in RetryProxy InetSocketAddress nnAddr = NameNode.getAddress(nameNodeUri); nnProxyInfo = new ProxyInfo<T>(NameNodeProxies.createNonHAProxy( conf, nnAddr, xface, UserGroupInformation.getCurrentUser(), false).getProxy(), nnAddr.toString()); } catch (IOException ioe) { throw new RuntimeException(ioe); } } return nnProxyInfo; }
Example #7
Source File: TestDecommission.java From RDFS with Apache License 2.0 | 6 votes |
private void verifyStats(NameNode namenode, FSNamesystem fsn, DatanodeInfo node, boolean decommissioning) throws InterruptedException, IOException{ // Do the stats check over 10 iterations for (int i = 0; i < 10; i++) { long[] newStats = namenode.getStats(); // For decommissioning nodes, ensure capacity of the DN is no longer // counted. Only used space of the DN is counted in cluster capacity assertEquals(newStats[0], decommissioning ? node.getDfsUsed() : node.getCapacity()); // Ensure cluster used capacity is counted for both normal and // decommissioning nodes assertEquals(newStats[1], node.getDfsUsed()); // For decommissioning nodes, remaining space from the DN is not counted assertEquals(newStats[2], decommissioning ? 0 : node.getRemaining()); // Ensure transceiver count is same as that DN assertEquals(fsn.getTotalLoad(), node.getXceiverCount()); Thread.sleep(HEARTBEAT_INTERVAL * 1000); // Sleep heart beat interval } }
Example #8
Source File: IPFailoverProxyProvider.java From hadoop with Apache License 2.0 | 6 votes |
@Override public synchronized ProxyInfo<T> getProxy() { // Create a non-ha proxy if not already created. if (nnProxyInfo == null) { try { // Create a proxy that is not wrapped in RetryProxy InetSocketAddress nnAddr = NameNode.getAddress(nameNodeUri); nnProxyInfo = new ProxyInfo<T>(NameNodeProxies.createNonHAProxy( conf, nnAddr, xface, UserGroupInformation.getCurrentUser(), false).getProxy(), nnAddr.toString()); } catch (IOException ioe) { throw new RuntimeException(ioe); } } return nnProxyInfo; }
Example #9
Source File: FastCopySetupUtil.java From RDFS with Apache License 2.0 | 6 votes |
public void testFastCopyMultiple(boolean hardlink) throws Exception { // Create a source file. String src = "/testFastCopyMultipleSrc" + hardlink; generateRandomFile(fs, src, FILESIZE); String destination = "/testFastCopyMultipleDestination" + hardlink; FastCopy fastCopy = new FastCopy(conf); List<FastFileCopyRequest> requests = new ArrayList<FastFileCopyRequest>(); for (int i = 0; i < COPIES; i++) { requests.add(new FastFileCopyRequest(src, destination + i, fs, fs)); } NameNode namenode = cluster.getNameNode(); try { fastCopy.copy(requests); for (FastFileCopyRequest r : requests) { assertTrue(verifyCopiedFile(r.getSrc(), r.getDestination(), namenode, namenode, fs, fs, hardlink)); verifyFileStatus(r.getDestination(), namenode, fastCopy); } } catch (Exception e) { LOG.error("Fast Copy failed with exception : ", e); fail("Fast Copy failed"); } finally { fastCopy.shutdown(); } assertTrue(pass); }
Example #10
Source File: NameNodeMetrics.java From RDFS with Apache License 2.0 | 6 votes |
public NameNodeMetrics(Configuration conf, NameNode nameNode) { String sessionId = conf.get("session.id"); // Initiate Java VM metrics JvmMetrics.init("NameNode", sessionId); // Now the Mbean for the name node - this alos registers the MBean namenodeActivityMBean = new NameNodeActivtyMBean(registry); // Create a record for NameNode metrics MetricsContext metricsContext = MetricsUtil.getContext("dfs"); metricsRecord = MetricsUtil.createRecord(metricsContext, "namenode"); metricsRecord.setTag("sessionId", sessionId); metricsContext.registerUpdater(this); log.info("Initializing NameNodeMeterics using context object:" + metricsContext.getClass().getName()); }
Example #11
Source File: BootstrapStandby.java From hadoop with Apache License 2.0 | 6 votes |
@Override public int run(String[] args) throws Exception { parseArgs(args); parseConfAndFindOtherNN(); NameNode.checkAllowFormat(conf); InetSocketAddress myAddr = NameNode.getAddress(conf); SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName()); return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() { @Override public Integer run() { try { return doRun(); } catch (IOException e) { throw new RuntimeException(e); } } }); }
Example #12
Source File: TestHAStateTransitions.java From big-c with Apache License 2.0 | 6 votes |
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster, NameNode nn, boolean writeHeader) throws IOException { long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId(); URI sharedEditsUri = cluster.getSharedEditsDir(0, 1); File sharedEditsDir = new File(sharedEditsUri.getPath()); StorageDirectory storageDir = new StorageDirectory(sharedEditsDir); File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir, txid + 1); assertTrue("Failed to create in-progress edits file", inProgressFile.createNewFile()); if (writeHeader) { DataOutputStream out = new DataOutputStream(new FileOutputStream( inProgressFile)); EditLogFileOutputStream.writeHeader( NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out); out.close(); } }
Example #13
Source File: DatanodeManager.java From hadoop with Apache License 2.0 | 6 votes |
/** * Remove a datanode * @throws UnregisteredNodeException */ public void removeDatanode(final DatanodeID node ) throws UnregisteredNodeException { namesystem.writeLock(); try { final DatanodeDescriptor descriptor = getDatanode(node); if (descriptor != null) { removeDatanode(descriptor); } else { NameNode.stateChangeLog.warn("BLOCK* removeDatanode: " + node + " does not exist"); } } finally { namesystem.writeUnlock(); } }
Example #14
Source File: TestHDFSServerPorts.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * Start the name-node. */ public NameNode startNameNode() throws IOException { String dataDir = System.getProperty("test.build.data"); hdfsDir = new File(dataDir, "dfs"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } config = new Configuration(); config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath()); FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0"); config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0"); NameNode.format(config); String[] args = new String[] {}; // NameNode will modify config with the ports it bound to return NameNode.createNameNode(args, config); }
Example #15
Source File: TestDFSClientFailover.java From hadoop with Apache License 2.0 | 6 votes |
/** * Make sure that client failover works when an active NN dies and the standby * takes over. */ @Test public void testDfsClientFailover() throws IOException, URISyntaxException { FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.createFile(fs, TEST_FILE, FILE_LENGTH_TO_VERIFY, (short)1, 1L); assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY); cluster.shutdownNameNode(0); cluster.transitionToActive(1); assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY); // Check that it functions even if the URL becomes canonicalized // to include a port number. Path withPort = new Path("hdfs://" + HATestUtil.getLogicalHostname(cluster) + ":" + NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath()); FileSystem fs2 = withPort.getFileSystem(fs.getConf()); assertTrue(fs2.exists(withPort)); fs.close(); }
Example #16
Source File: AvatarDataNode.java From RDFS with Apache License 2.0 | 6 votes |
private static List<InetSocketAddress> getDatanodeProtocolAddresses( Configuration conf, Collection<String> serviceIds) throws IOException { // Use default address as fall back String defaultAddress; try { defaultAddress = conf.get(FileSystem.FS_DEFAULT_NAME_KEY); if (defaultAddress != null) { Configuration newConf = new Configuration(conf); newConf.set(FileSystem.FS_DEFAULT_NAME_KEY, defaultAddress); defaultAddress = NameNode.getHostPortString(NameNode.getAddress(newConf)); } } catch (IllegalArgumentException e) { defaultAddress = null; } List<InetSocketAddress> addressList = DFSUtil.getAddresses(conf, serviceIds, defaultAddress, NameNode.DATANODE_PROTOCOL_ADDRESS, FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY); if (addressList == null) { throw new IOException("Incorrect configuration: namenode address " + FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY + " is not configured."); } return addressList; }
Example #17
Source File: TestDFSShellGenericOptions.java From hadoop with Apache License 2.0 | 6 votes |
private void execute(String [] args, String namenode) { FsShell shell=new FsShell(); FileSystem fs=null; try { ToolRunner.run(shell, args); fs = FileSystem.get(NameNode.getUri(NameNode.getAddress(namenode)), shell.getConf()); assertTrue("Directory does not get created", fs.isDirectory(new Path("/data"))); fs.delete(new Path("/data"), true); } catch (Exception e) { System.err.println(e.getMessage()); e.printStackTrace(); } finally { if (fs!=null) { try { fs.close(); } catch (IOException ignored) { } } } }
Example #18
Source File: MiniDFSCluster.java From RDFS with Apache License 2.0 | 6 votes |
/** * Returns true if the NameNode is running and is out of Safe Mode * or if waiting for safe mode is disabled. */ public boolean isNameNodeUp(int nnIndex) { NameNode nn = nameNodes[nnIndex].nameNode; if (nn == null) { return false; } try { long[] sizes = nn.getStats(); boolean isUp = false; synchronized (this) { isUp = ((!nn.isInSafeMode() || !waitSafeMode) && sizes[0] != 0); } return isUp; } catch (IOException ie) { return false; } }
Example #19
Source File: TestDFSShellGenericOptions.java From big-c with Apache License 2.0 | 6 votes |
private void execute(String [] args, String namenode) { FsShell shell=new FsShell(); FileSystem fs=null; try { ToolRunner.run(shell, args); fs = FileSystem.get(NameNode.getUri(NameNode.getAddress(namenode)), shell.getConf()); assertTrue("Directory does not get created", fs.isDirectory(new Path("/data"))); fs.delete(new Path("/data"), true); } catch (Exception e) { System.err.println(e.getMessage()); e.printStackTrace(); } finally { if (fs!=null) { try { fs.close(); } catch (IOException ignored) { } } } }
Example #20
Source File: TestHDFSServerPorts.java From RDFS with Apache License 2.0 | 6 votes |
/** * Start the name-node. */ public NameNode startNameNode() throws IOException { String dataDir = System.getProperty("test.build.data"); hdfsDir = new File(dataDir, "dfs"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } config = new Configuration(); config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath()); FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0"); config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0"); NameNode.format(config); String[] args = new String[] {}; // NameNode will modify config with the ports it bound to return NameNode.createNameNode(args, config); }
Example #21
Source File: DatanodeManager.java From hadoop with Apache License 2.0 | 6 votes |
/** Remove a dead datanode. */ void removeDeadDatanode(final DatanodeID nodeID) { synchronized(datanodeMap) { DatanodeDescriptor d; try { d = getDatanode(nodeID); } catch(IOException e) { d = null; } if (d != null && isDatanodeDead(d)) { NameNode.stateChangeLog.info( "BLOCK* removeDeadDatanode: lost heartbeat from " + d); removeDatanode(d); } } }
Example #22
Source File: DistributedFileSystem.java From RDFS with Apache License 2.0 | 5 votes |
/** Permit paths which explicitly specify the default port. */ protected void checkPath(Path path) { URI thisUri = this.getUri(); URI thatUri = path.toUri(); String thatAuthority = thatUri.getAuthority(); if (thatUri.getScheme() != null && thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme()) && thatUri.getPort() == NameNode.DEFAULT_PORT && thisUri.getPort() == -1 && thatAuthority.substring(0,thatAuthority.indexOf(":")) .equalsIgnoreCase(thisUri.getAuthority())) return; super.checkPath(path); }
Example #23
Source File: MiniQJMHACluster.java From big-c with Apache License 2.0 | 5 votes |
private MiniQJMHACluster(Builder builder) throws IOException { this.conf = builder.conf; int retryCount = 0; while (true) { try { basePort = 10000 + RANDOM.nextInt(1000) * 4; // start 3 journal nodes journalCluster = new MiniJournalCluster.Builder(conf).format(true) .build(); URI journalURI = journalCluster.getQuorumJournalURI(NAMESERVICE); // start cluster with 2 NameNodes MiniDFSNNTopology topology = createDefaultTopology(basePort); initHAConf(journalURI, builder.conf); // First start up the NNs just to format the namespace. The MinIDFSCluster // has no way to just format the NameNodes without also starting them. cluster = builder.dfsBuilder.nnTopology(topology) .manageNameDfsSharedDirs(false).build(); cluster.waitActive(); cluster.shutdownNameNodes(); // initialize the journal nodes Configuration confNN0 = cluster.getConfiguration(0); NameNode.initializeSharedEdits(confNN0, true); cluster.getNameNodeInfos()[0].setStartOpt(builder.startOpt); cluster.getNameNodeInfos()[1].setStartOpt(builder.startOpt); // restart the cluster cluster.restartNameNodes(); ++retryCount; break; } catch (BindException e) { LOG.info("MiniQJMHACluster port conflicts, retried " + retryCount + " times"); } } }
Example #24
Source File: DFSUtil.java From RDFS with Apache License 2.0 | 5 votes |
/** * Returns list of InetSocketAddresses corresponding to namenodes from the * configuration. * @param suffix 0 or 1 indicating if this is AN0 or AN1 * @param conf configuration * @param keys Set of keys * @return list of InetSocketAddress * @throws IOException on error */ public static List<InetSocketAddress> getRPCAddresses(String suffix, Configuration conf, Collection<String> serviceIds, String... keys) throws IOException { // Use default address as fall back String defaultAddress = null; try { defaultAddress = conf.get(FileSystem.FS_DEFAULT_NAME_KEY + suffix); if (defaultAddress != null) { Configuration newConf = new Configuration(conf); newConf.set(FileSystem.FS_DEFAULT_NAME_KEY, defaultAddress); defaultAddress = NameNode.getHostPortString(NameNode.getAddress(newConf)); } } catch (IllegalArgumentException e) { defaultAddress = null; } for (int i = 0; i < keys.length; i++) { keys[i] += suffix; } List<InetSocketAddress> addressList = DFSUtil.getAddresses(conf, serviceIds, defaultAddress, keys); if (addressList == null) { String keyStr = ""; for (String key: keys) { keyStr += key + " "; } throw new IOException("Incorrect configuration: namenode address " + keyStr + " is not configured."); } return addressList; }
Example #25
Source File: DFSTestUtil.java From hadoop with Apache License 2.0 | 5 votes |
public static void setNameNodeLogLevel(Level level) { GenericTestUtils.setLogLevel(FSNamesystem.LOG, level); GenericTestUtils.setLogLevel(BlockManager.LOG, level); GenericTestUtils.setLogLevel(LeaseManager.LOG, level); GenericTestUtils.setLogLevel(NameNode.LOG, level); GenericTestUtils.setLogLevel(NameNode.stateChangeLog, level); GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, level); }
Example #26
Source File: ActiveState.java From big-c with Apache License 2.0 | 5 votes |
@Override public void setState(HAContext context, HAState s) throws ServiceFailedException { if (s == NameNode.STANDBY_STATE) { setStateInternal(context, s); return; } super.setState(context, s); }
Example #27
Source File: TestReplicationPolicyWithNodeGroup.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { FileSystem.setDefaultUri(CONF, "hdfs://localhost:0"); CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); // Set properties to make HDFS aware of NodeGroup. CONF.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, BlockPlacementPolicyWithNodeGroup.class.getName()); CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, NetworkTopologyWithNodeGroup.class.getName()); CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true); File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class); CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath()); DFSTestUtil.formatNameNode(CONF); namenode = new NameNode(CONF); final BlockManager bm = namenode.getNamesystem().getBlockManager(); replicator = bm.getBlockPlacementPolicy(); cluster = bm.getDatanodeManager().getNetworkTopology(); // construct network topology for(int i=0; i<NUM_OF_DATANODES; i++) { cluster.add(dataNodes[i]); } setupDataNodeCapacity(); }
Example #28
Source File: MiniDFSCluster.java From RDFS with Apache License 2.0 | 5 votes |
/** * Add a namenode to cluster and start it. Configuration of datanodes * in the cluster is refreshed to register with the new namenode. * @return newly started namenode */ public NameNode addNameNode(Configuration conf, int namenodePort) throws IOException { if(!federation) { throw new IOException("cannot add namenode to non-federated cluster"); } int nnIndex = nameNodes.length; int numNameNodes = nameNodes.length + 1; NameNodeInfo[] newlist = new NameNodeInfo[numNameNodes]; System.arraycopy(nameNodes, 0, newlist, 0, nameNodes.length); nameNodes = newlist; String nameserviceId = NAMESERVICE_ID_PREFIX + getNSId(); String nameserviceIds = conf.get(FSConstants.DFS_FEDERATION_NAMESERVICES); nameserviceIds += "," + nameserviceId; conf.set(FSConstants.DFS_FEDERATION_NAMESERVICES, nameserviceIds); initFederatedNamenodeAddress(conf, nameserviceId, namenodePort); createFederatedNameNode(nnIndex, conf, numDataNodes, true, true, null, nameserviceId); // Refresh datanodes with the newly started namenode for (DataNodeProperties dn : dataNodes) { DataNode datanode = dn.datanode; datanode.refreshNamenodes(conf); } // Wait for new namenode to get registrations from all the datanodes waitActive(true, nnIndex); return nameNodes[nnIndex].nameNode; }
Example #29
Source File: TestFiPipelines.java From big-c with Apache License 2.0 | 5 votes |
private static void initLoggers() { ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL); ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) TestFiPipelines.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) FiTestUtil.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) BlockReceiverAspects.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) DFSClientAspects.LOG).getLogger().setLevel(Level.ALL); }
Example #30
Source File: TestFileAppend4.java From RDFS with Apache License 2.0 | 5 votes |
/** * Test that a file is not considered complete when it only has in-progress * blocks. This ensures that when a block is appended to, it is converted * back into the right kind of "in progress" state. */ public void testNotPrematurelyComplete() throws Exception { LOG.info("START"); cluster = new MiniDFSCluster(conf, 3, true, null); FileSystem fs1 = cluster.getFileSystem(); try { int halfBlock = (int)BLOCK_SIZE/2; short rep = 3; // replication assertTrue(BLOCK_SIZE%4 == 0); file1 = new Path("/delayedReceiveBlock"); // write 1/2 block & close stm = fs1.create(file1, true, (int)BLOCK_SIZE*2, rep, BLOCK_SIZE); AppendTestUtil.write(stm, 0, halfBlock); stm.close(); NameNode nn = cluster.getNameNode(); LOG.info("======== Appending"); stm = fs1.append(file1); LOG.info("======== Writing"); AppendTestUtil.write(stm, 0, halfBlock/2); LOG.info("======== Checking progress"); assertFalse(NameNodeAdapter.checkFileProgress(nn.namesystem, "/delayedReceiveBlock", true)); LOG.info("======== Closing"); stm.close(); } catch (Throwable e) { e.printStackTrace(); throw new IOException(e); } finally { LOG.info("======== Cleaning up"); fs1.close(); cluster.shutdown(); } }