Java Code Examples for org.apache.hadoop.hdfs.protocol.DatanodeInfo#isDecommissionInProgress()
The following examples show how to use
org.apache.hadoop.hdfs.protocol.DatanodeInfo#isDecommissionInProgress() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Dispatcher.java From hadoop with Apache License 2.0 | 6 votes |
private boolean shouldIgnore(DatanodeInfo dn) { // ignore decommissioned nodes final boolean decommissioned = dn.isDecommissioned(); // ignore decommissioning nodes final boolean decommissioning = dn.isDecommissionInProgress(); // ignore nodes in exclude list final boolean excluded = Util.isExcluded(excludedNodes, dn); // ignore nodes not in the include list (if include list is not empty) final boolean notIncluded = !Util.isIncluded(includedNodes, dn); if (decommissioned || decommissioning || excluded || notIncluded) { if (LOG.isTraceEnabled()) { LOG.trace("Excluding datanode " + dn + ": " + decommissioned + ", " + decommissioning + ", " + excluded + ", " + notIncluded); } return true; } return false; }
Example 2
Source File: Dispatcher.java From big-c with Apache License 2.0 | 6 votes |
private boolean shouldIgnore(DatanodeInfo dn) { // ignore decommissioned nodes final boolean decommissioned = dn.isDecommissioned(); // ignore decommissioning nodes final boolean decommissioning = dn.isDecommissionInProgress(); // ignore nodes in exclude list final boolean excluded = Util.isExcluded(excludedNodes, dn); // ignore nodes not in the include list (if include list is not empty) final boolean notIncluded = !Util.isIncluded(includedNodes, dn); if (decommissioned || decommissioning || excluded || notIncluded) { if (LOG.isTraceEnabled()) { LOG.trace("Excluding datanode " + dn + ": " + decommissioned + ", " + decommissioning + ", " + excluded + ", " + notIncluded); } return true; } return false; }
Example 3
Source File: DatanodeBenThread.java From RDFS with Apache License 2.0 | 6 votes |
private static Set<DatanodeInfo> getValidDatanodes(JobConf nameNodeConf, DatanodeBenRunTimeConstants rtc) throws IOException { FileSystem fs = FileSystem.get(nameNodeConf); DistributedFileSystem dfs = getDFS(fs); HashMap<String, ArrayList<Path>> nsPickLists = rtc.pickLists.get(nameNodeConf.get(FileSystem.FS_DEFAULT_NAME_KEY)); DatanodeInfo[] dnStats = dfs.getLiveDataNodeStats(); Set<DatanodeInfo> validDatanodes = new HashSet<DatanodeInfo>(); for (DatanodeInfo dn: dnStats) { if (dn.isDecommissioned() || dn.isDecommissionInProgress()) { continue; } if (!nsPickLists.containsKey(dn.getHostName())) { continue; } validDatanodes.add(dn); } return validDatanodes; }
Example 4
Source File: TestDecommission.java From hadoop-gpu with Apache License 2.0 | 6 votes |
private boolean checkNodeState(FileSystem filesys, String node, NodeState state) throws IOException { DistributedFileSystem dfs = (DistributedFileSystem) filesys; boolean done = false; boolean foundNode = false; DatanodeInfo[] datanodes = dfs.getDataNodeStats(); for (int i = 0; i < datanodes.length; i++) { DatanodeInfo dn = datanodes[i]; if (dn.getName().equals(node)) { if (state == NodeState.DECOMMISSIONED) { done = dn.isDecommissioned(); } else if (state == NodeState.DECOMMISSION_INPROGRESS) { done = dn.isDecommissionInProgress(); } else { done = (!dn.isDecommissionInProgress() && !dn.isDecommissioned()); } System.out.println(dn.getDatanodeReport()); foundNode = true; } } if (!foundNode) { throw new IOException("Could not find node: " + node); } return done; }
Example 5
Source File: Decoder.java From RDFS with Apache License 2.0 | 5 votes |
boolean isBlockDecom(LocatedBlock block) { // Copy this block iff all good copies are being decommissioned boolean allDecommissioning = true; for (DatanodeInfo i : block.getLocations()) { allDecommissioning &= i.isDecommissionInProgress(); } if (allDecommissioning) { return true; } return false; }
Example 6
Source File: BlockReconstructor.java From RDFS with Apache License 2.0 | 5 votes |
boolean isBlockDecom(LocatedBlock block) { // Copy this block iff all good copies are being decommissioned boolean allDecommissioning = true; for (DatanodeInfo i : block.getLocations()) { allDecommissioning &= i.isDecommissionInProgress(); } if (allDecommissioning) { return true; } return false; }
Example 7
Source File: BlockReconstructor.java From RDFS with Apache License 2.0 | 4 votes |
List<LocatedBlockWithMetaInfo> lostBlocksInFile( DistributedFileSystem fs, String uriPath, FileStatus stat) throws IOException { List<LocatedBlockWithMetaInfo> decommissioning = new LinkedList<LocatedBlockWithMetaInfo>(); VersionedLocatedBlocks locatedBlocks; int namespaceId = 0; int methodFingerprint = 0; if (DFSClient .isMetaInfoSuppoted(fs.getClient().namenodeProtocolProxy)) { LocatedBlocksWithMetaInfo lbksm = fs.getClient().namenode .openAndFetchMetaInfo(uriPath, 0, stat.getLen()); namespaceId = lbksm.getNamespaceID(); locatedBlocks = lbksm; methodFingerprint = lbksm.getMethodFingerPrint(); fs.getClient().getNewNameNodeIfNeeded(methodFingerprint); } else { locatedBlocks = fs.getClient().namenode.open(uriPath, 0, stat.getLen()); } final int dataTransferVersion = locatedBlocks .getDataProtocolVersion(); for (LocatedBlock b : locatedBlocks.getLocatedBlocks()) { if (b.isCorrupt() || (b.getLocations().length == 0 && b.getBlockSize() > 0)) { // If corrupt, this block is the responsibility of the // CorruptBlockReconstructor continue; } // Copy this block iff all good copies are being decommissioned boolean allDecommissioning = true; for (DatanodeInfo i : b.getLocations()) { allDecommissioning &= i.isDecommissionInProgress(); } if (allDecommissioning) { decommissioning .add(new LocatedBlockWithMetaInfo(b.getBlock(), b .getLocations(), b.getStartOffset(), dataTransferVersion, namespaceId, methodFingerprint)); } } return decommissioning; }