Java Code Examples for org.apache.hadoop.hdfs.server.namenode.INode#isFile()
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.INode#isFile() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SnapshotFSImageFormat.java From hadoop with Apache License 2.0 | 6 votes |
/** * Load the deleted list from the fsimage. * * @param parent The directory that the deleted list belongs to. * @param createdList The created list associated with the deleted list in * the same Diff. * @param in The {@link DataInput} to read. * @param loader The {@link Loader} instance. * @return The deleted list. */ private static List<INode> loadDeletedList(INodeDirectory parent, List<INode> createdList, DataInput in, FSImageFormat.Loader loader) throws IOException { int deletedSize = in.readInt(); List<INode> deletedList = new ArrayList<INode>(deletedSize); for (int i = 0; i < deletedSize; i++) { final INode deleted = loader.loadINodeWithLocalName(true, in, true); deletedList.add(deleted); // set parent: the parent field of an INode in the deleted list is not // useful, but set the parent here to be consistent with the original // fsdir tree. deleted.setParent(parent); if (deleted.isFile()) { loader.updateBlocksMap(deleted.asFile()); } } return deletedList; }
Example 2
Source File: SnapshotFSImageFormat.java From big-c with Apache License 2.0 | 6 votes |
/** * Load the deleted list from the fsimage. * * @param parent The directory that the deleted list belongs to. * @param createdList The created list associated with the deleted list in * the same Diff. * @param in The {@link DataInput} to read. * @param loader The {@link Loader} instance. * @return The deleted list. */ private static List<INode> loadDeletedList(INodeDirectory parent, List<INode> createdList, DataInput in, FSImageFormat.Loader loader) throws IOException { int deletedSize = in.readInt(); List<INode> deletedList = new ArrayList<INode>(deletedSize); for (int i = 0; i < deletedSize; i++) { final INode deleted = loader.loadINodeWithLocalName(true, in, true); deletedList.add(deleted); // set parent: the parent field of an INode in the deleted list is not // useful, but set the parent here to be consistent with the original // fsdir tree. deleted.setParent(parent); if (deleted.isFile()) { loader.updateBlocksMap(deleted.asFile()); } } return deletedList; }
Example 3
Source File: RangerHdfsAuthorizer.java From ranger with Apache License 2.0 | 5 votes |
private void buildRequestContext(final INode inode) { if (inode.isFile()) { String fileName = inode.getLocalName(); RangerAccessRequestUtil.setTokenInContext(getContext(), RangerHdfsAuthorizer.KEY_FILENAME, fileName); int lastExtensionSeparatorIndex = fileName.lastIndexOf(RangerHdfsPlugin.getFileNameExtensionSeparator()); if (lastExtensionSeparatorIndex != -1) { String baseFileName = fileName.substring(0, lastExtensionSeparatorIndex); RangerAccessRequestUtil.setTokenInContext(getContext(), RangerHdfsAuthorizer.KEY_BASE_FILENAME, baseFileName); } } }
Example 4
Source File: DirectoryWithSnapshotFeature.java From hadoop with Apache License 2.0 | 4 votes |
/** * Destroy a subtree under a DstReference node. */ public static void destroyDstSubtree( final BlockStoragePolicySuite bsps, INode inode, final int snapshot, final int prior, final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) throws QuotaExceededException { Preconditions.checkArgument(prior != Snapshot.NO_SNAPSHOT_ID); if (inode.isReference()) { if (inode instanceof INodeReference.WithName && snapshot != Snapshot.CURRENT_STATE_ID) { // this inode has been renamed before the deletion of the DstReference // subtree inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes); } else { // for DstReference node, continue this process to its subtree destroyDstSubtree(bsps, inode.asReference().getReferredINode(), snapshot, prior, collectedBlocks, removedINodes); } } else if (inode.isFile()) { inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes); } else if (inode.isDirectory()) { Map<INode, INode> excludedNodes = null; INodeDirectory dir = inode.asDirectory(); DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature(); if (sf != null) { DirectoryDiffList diffList = sf.getDiffs(); DirectoryDiff priorDiff = diffList.getDiffById(prior); if (priorDiff != null && priorDiff.getSnapshotId() == prior) { List<INode> dList = priorDiff.diff.getList(ListType.DELETED); excludedNodes = cloneDiffList(dList); } if (snapshot != Snapshot.CURRENT_STATE_ID) { diffList.deleteSnapshotDiff(bsps, snapshot, prior, dir, collectedBlocks, removedINodes); } priorDiff = diffList.getDiffById(prior); if (priorDiff != null && priorDiff.getSnapshotId() == prior) { priorDiff.diff.destroyCreatedList(bsps, dir, collectedBlocks, removedINodes); } } for (INode child : inode.asDirectory().getChildrenList(prior)) { if (excludedNodes != null && excludedNodes.containsKey(child)) { continue; } destroyDstSubtree(bsps, child, snapshot, prior, collectedBlocks, removedINodes); } } }
Example 5
Source File: DirectoryWithSnapshotFeature.java From hadoop with Apache License 2.0 | 4 votes |
/** * Clean an inode while we move it from the deleted list of post to the * deleted list of prior. * @param bsps The block storage policy suite. * @param inode The inode to clean. * @param post The post snapshot. * @param prior The id of the prior snapshot. * @param collectedBlocks Used to collect blocks for later deletion. * @return Quota usage update. */ private static QuotaCounts cleanDeletedINode( final BlockStoragePolicySuite bsps, INode inode, final int post, final int prior, final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) { QuotaCounts counts = new QuotaCounts.Builder().build(); Deque<INode> queue = new ArrayDeque<INode>(); queue.addLast(inode); while (!queue.isEmpty()) { INode topNode = queue.pollFirst(); if (topNode instanceof INodeReference.WithName) { INodeReference.WithName wn = (INodeReference.WithName) topNode; if (wn.getLastSnapshotId() >= post) { INodeReference.WithCount wc = (INodeReference.WithCount) wn.getReferredINode(); if (wc.getLastWithName() == wn && wc.getParentReference() == null) { // this wn is the last wn inside of the wc, also the dstRef node has // been deleted. In this case, we should treat the referred file/dir // as normal case queue.add(wc.getReferredINode()); } else { wn.cleanSubtree(bsps, post, prior, collectedBlocks, removedINodes); } } // For DstReference node, since the node is not in the created list of // prior, we should treat it as regular file/dir } else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) { INodeFile file = topNode.asFile(); counts.add(file.getDiffs().deleteSnapshotDiff(bsps, post, prior, file, collectedBlocks, removedINodes)); } else if (topNode.isDirectory()) { INodeDirectory dir = topNode.asDirectory(); ChildrenDiff priorChildrenDiff = null; DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature(); if (sf != null) { // delete files/dirs created after prior. Note that these // files/dirs, along with inode, were deleted right after post. DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior); if (priorDiff != null && priorDiff.getSnapshotId() == prior) { priorChildrenDiff = priorDiff.getChildrenDiff(); counts.add(priorChildrenDiff.destroyCreatedList(bsps, dir, collectedBlocks, removedINodes)); } } for (INode child : dir.getChildrenList(prior)) { if (priorChildrenDiff != null && priorChildrenDiff.search(ListType.DELETED, child.getLocalNameBytes()) != null) { continue; } queue.addLast(child); } } } return counts; }
Example 6
Source File: FSImageFormatPBSnapshot.java From hadoop with Apache License 2.0 | 4 votes |
private void addToDeletedList(INode dnode, INodeDirectory parent) { dnode.setParent(parent); if (dnode.isFile()) { updateBlocksMap(dnode.asFile(), fsn.getBlockManager()); } }
Example 7
Source File: CacheReplicationMonitor.java From hadoop with Apache License 2.0 | 4 votes |
/** * Scan all CacheDirectives. Use the information to figure out * what cache replication factor each block should have. */ private void rescanCacheDirectives() { FSDirectory fsDir = namesystem.getFSDirectory(); final long now = new Date().getTime(); for (CacheDirective directive : cacheManager.getCacheDirectives()) { scannedDirectives++; // Skip processing this entry if it has expired if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) { LOG.debug("Directive {}: the directive expired at {} (now = {})", directive.getId(), directive.getExpiryTime(), now); continue; } String path = directive.getPath(); INode node; try { node = fsDir.getINode(path); } catch (UnresolvedLinkException e) { // We don't cache through symlinks LOG.debug("Directive {}: got UnresolvedLinkException while resolving " + "path {}", directive.getId(), path ); continue; } if (node == null) { LOG.debug("Directive {}: No inode found at {}", directive.getId(), path); } else if (node.isDirectory()) { INodeDirectory dir = node.asDirectory(); ReadOnlyList<INode> children = dir .getChildrenList(Snapshot.CURRENT_STATE_ID); for (INode child : children) { if (child.isFile()) { rescanFile(directive, child.asFile()); } } } else if (node.isFile()) { rescanFile(directive, node.asFile()); } else { LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ", directive.getId(), node); } } }
Example 8
Source File: DirectoryWithSnapshotFeature.java From big-c with Apache License 2.0 | 4 votes |
/** * Destroy a subtree under a DstReference node. */ public static void destroyDstSubtree( final BlockStoragePolicySuite bsps, INode inode, final int snapshot, final int prior, final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) throws QuotaExceededException { Preconditions.checkArgument(prior != Snapshot.NO_SNAPSHOT_ID); if (inode.isReference()) { if (inode instanceof INodeReference.WithName && snapshot != Snapshot.CURRENT_STATE_ID) { // this inode has been renamed before the deletion of the DstReference // subtree inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes); } else { // for DstReference node, continue this process to its subtree destroyDstSubtree(bsps, inode.asReference().getReferredINode(), snapshot, prior, collectedBlocks, removedINodes); } } else if (inode.isFile()) { inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes); } else if (inode.isDirectory()) { Map<INode, INode> excludedNodes = null; INodeDirectory dir = inode.asDirectory(); DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature(); if (sf != null) { DirectoryDiffList diffList = sf.getDiffs(); DirectoryDiff priorDiff = diffList.getDiffById(prior); if (priorDiff != null && priorDiff.getSnapshotId() == prior) { List<INode> dList = priorDiff.diff.getList(ListType.DELETED); excludedNodes = cloneDiffList(dList); } if (snapshot != Snapshot.CURRENT_STATE_ID) { diffList.deleteSnapshotDiff(bsps, snapshot, prior, dir, collectedBlocks, removedINodes); } priorDiff = diffList.getDiffById(prior); if (priorDiff != null && priorDiff.getSnapshotId() == prior) { priorDiff.diff.destroyCreatedList(bsps, dir, collectedBlocks, removedINodes); } } for (INode child : inode.asDirectory().getChildrenList(prior)) { if (excludedNodes != null && excludedNodes.containsKey(child)) { continue; } destroyDstSubtree(bsps, child, snapshot, prior, collectedBlocks, removedINodes); } } }
Example 9
Source File: DirectoryWithSnapshotFeature.java From big-c with Apache License 2.0 | 4 votes |
/** * Clean an inode while we move it from the deleted list of post to the * deleted list of prior. * @param bsps The block storage policy suite. * @param inode The inode to clean. * @param post The post snapshot. * @param prior The id of the prior snapshot. * @param collectedBlocks Used to collect blocks for later deletion. * @return Quota usage update. */ private static QuotaCounts cleanDeletedINode( final BlockStoragePolicySuite bsps, INode inode, final int post, final int prior, final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) { QuotaCounts counts = new QuotaCounts.Builder().build(); Deque<INode> queue = new ArrayDeque<INode>(); queue.addLast(inode); while (!queue.isEmpty()) { INode topNode = queue.pollFirst(); if (topNode instanceof INodeReference.WithName) { INodeReference.WithName wn = (INodeReference.WithName) topNode; if (wn.getLastSnapshotId() >= post) { wn.cleanSubtree(bsps, post, prior, collectedBlocks, removedINodes); } // For DstReference node, since the node is not in the created list of // prior, we should treat it as regular file/dir } else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) { INodeFile file = topNode.asFile(); counts.add(file.getDiffs().deleteSnapshotDiff(bsps, post, prior, file, collectedBlocks, removedINodes)); } else if (topNode.isDirectory()) { INodeDirectory dir = topNode.asDirectory(); ChildrenDiff priorChildrenDiff = null; DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature(); if (sf != null) { // delete files/dirs created after prior. Note that these // files/dirs, along with inode, were deleted right after post. DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior); if (priorDiff != null && priorDiff.getSnapshotId() == prior) { priorChildrenDiff = priorDiff.getChildrenDiff(); counts.add(priorChildrenDiff.destroyCreatedList(bsps, dir, collectedBlocks, removedINodes)); } } for (INode child : dir.getChildrenList(prior)) { if (priorChildrenDiff != null && priorChildrenDiff.search(ListType.DELETED, child.getLocalNameBytes()) != null) { continue; } queue.addLast(child); } } } return counts; }
Example 10
Source File: FSImageFormatPBSnapshot.java From big-c with Apache License 2.0 | 4 votes |
private void addToDeletedList(INode dnode, INodeDirectory parent) { dnode.setParent(parent); if (dnode.isFile()) { updateBlocksMap(dnode.asFile(), fsn.getBlockManager()); } }
Example 11
Source File: CacheReplicationMonitor.java From big-c with Apache License 2.0 | 4 votes |
/** * Scan all CacheDirectives. Use the information to figure out * what cache replication factor each block should have. */ private void rescanCacheDirectives() { FSDirectory fsDir = namesystem.getFSDirectory(); final long now = new Date().getTime(); for (CacheDirective directive : cacheManager.getCacheDirectives()) { scannedDirectives++; // Skip processing this entry if it has expired if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) { LOG.debug("Directive {}: the directive expired at {} (now = {})", directive.getId(), directive.getExpiryTime(), now); continue; } String path = directive.getPath(); INode node; try { node = fsDir.getINode(path); } catch (UnresolvedLinkException e) { // We don't cache through symlinks LOG.debug("Directive {}: got UnresolvedLinkException while resolving " + "path {}", directive.getId(), path ); continue; } if (node == null) { LOG.debug("Directive {}: No inode found at {}", directive.getId(), path); } else if (node.isDirectory()) { INodeDirectory dir = node.asDirectory(); ReadOnlyList<INode> children = dir .getChildrenList(Snapshot.CURRENT_STATE_ID); for (INode child : children) { if (child.isFile()) { rescanFile(directive, child.asFile()); } } } else if (node.isFile()) { rescanFile(directive, node.asFile()); } else { LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ", directive.getId(), node); } } }
Example 12
Source File: RangerHdfsAuthorizer.java From ranger with Apache License 2.0 | 4 votes |
private AuthzStatus traverseOnlyCheck(INode inode, INodeAttributes[] inodeAttrs, String path, byte[][] components, INode parent, INode ancestor, int ancestorIndex, String user, Set<String> groups, RangerHdfsPlugin plugin, RangerHdfsAuditHandler auditHandler) { if (LOG.isDebugEnabled()) { LOG.debug("==> RangerAccessControlEnforcer.traverseOnlyCheck(" + "path=" + path + ", user=" + user + ", groups=" + groups + ")"); } final AuthzStatus ret; INode nodeToCheck = inode; INodeAttributes nodeAttribs = inodeAttrs.length > 0 ? inodeAttrs[inodeAttrs.length - 1] : null; boolean skipAuditOnAllow = false; String resourcePath = path; if (nodeToCheck == null || nodeToCheck.isFile()) { skipAuditOnAllow = true; if (parent != null) { nodeToCheck = parent; nodeAttribs = inodeAttrs.length > 1 ? inodeAttrs[inodeAttrs.length - 2] : null; resourcePath = inodeAttrs.length > 0 ? DFSUtil.byteArray2PathString(components, 0, inodeAttrs.length - 1) : HDFS_ROOT_FOLDER_PATH; } else if (ancestor != null) { nodeToCheck = ancestor; nodeAttribs = inodeAttrs.length > ancestorIndex ? inodeAttrs[ancestorIndex] : null; resourcePath = nodeAttribs != null ? DFSUtil.byteArray2PathString(components, 0, ancestorIndex+1) : HDFS_ROOT_FOLDER_PATH; } } if (nodeToCheck != null) { if (resourcePath.length() > 1) { if (resourcePath.endsWith(HDFS_ROOT_FOLDER_PATH)) { resourcePath = resourcePath.substring(0, resourcePath.length()-1); } } ret = isAccessAllowedForTraversal(nodeToCheck, nodeAttribs, resourcePath, user, groups, plugin, auditHandler, skipAuditOnAllow); } else { ret = AuthzStatus.ALLOW; } if (LOG.isDebugEnabled()) { LOG.debug("<== RangerAccessControlEnforcer.traverseOnlyCheck(" + "path=" + path + ", resourcePath=" + resourcePath + ", user=" + user + ", groups=" + groups + ") : " + ret); } return ret; }
Example 13
Source File: RangerHdfsAuthorizer.java From ranger with Apache License 2.0 | 4 votes |
private AuthzStatus checkDefaultEnforcer(String fsOwner, String superGroup, UserGroupInformation ugi, INodeAttributes[] inodeAttrs, INode[] inodes, byte[][] pathByNameArr, int snapshotId, String path, int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess, FsAction access, FsAction subAccess, boolean ignoreEmptyDir, boolean isTraverseOnlyCheck, INode ancestor, INode parent, INode inode, RangerHdfsAuditHandler auditHandler ) throws AccessControlException { if (LOG.isDebugEnabled()) { LOG.debug("==> RangerAccessControlEnforcer.checkDefaultEnforcer(" + "fsOwner=" + fsOwner + "; superGroup=" + superGroup + ", inodesCount=" + (inodes != null ? inodes.length : 0) + ", snapshotId=" + snapshotId + ", path=" + path + ", ancestorIndex=" + ancestorIndex + ", doCheckOwner=" + doCheckOwner + ", ancestorAccess=" + ancestorAccess + ", parentAccess=" + parentAccess + ", access=" + access + ", subAccess=" + subAccess + ", ignoreEmptyDir=" + ignoreEmptyDir + ", isTraverseOnlyCheck=" + isTraverseOnlyCheck + ",ancestor=" + (ancestor == null ? null : ancestor.getFullPathName()) + ", parent=" + (parent == null ? null : parent.getFullPathName()) + ", inode=" + (inode == null ? null : inode.getFullPathName()) + ")"); } AuthzStatus authzStatus = AuthzStatus.NOT_DETERMINED; if(rangerPlugin.isHadoopAuthEnabled() && defaultEnforcer != null) { RangerPerfTracer hadoopAuthPerf = null; if(RangerPerfTracer.isPerfTraceEnabled(PERF_HDFSAUTH_REQUEST_LOG)) { hadoopAuthPerf = RangerPerfTracer.getPerfTracer(PERF_HDFSAUTH_REQUEST_LOG, "RangerAccessControlEnforcer.checkDefaultEnforcer(path=" + path + ")"); } try { defaultEnforcer.checkPermission(fsOwner, superGroup, ugi, inodeAttrs, inodes, pathByNameArr, snapshotId, path, ancestorIndex, doCheckOwner, ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir); authzStatus = AuthzStatus.ALLOW; } finally { if (auditHandler != null) { INode nodeChecked = inode; FsAction action = access; if (isTraverseOnlyCheck) { if (nodeChecked == null || nodeChecked.isFile()) { if (parent != null) { nodeChecked = parent; } else if (ancestor != null) { nodeChecked = ancestor; } } action = FsAction.EXECUTE; } else if (action == null || action == FsAction.NONE) { if (parentAccess != null && parentAccess != FsAction.NONE) { nodeChecked = parent; action = parentAccess; } else if (ancestorAccess != null && ancestorAccess != FsAction.NONE) { nodeChecked = ancestor; action = ancestorAccess; } else if (subAccess != null && subAccess != FsAction.NONE) { action = subAccess; } } String pathChecked = nodeChecked != null ? nodeChecked.getFullPathName() : path; auditHandler.logHadoopEvent(pathChecked, action, authzStatus == AuthzStatus.ALLOW); } RangerPerfTracer.log(hadoopAuthPerf); } } LOG.debug("<== RangerAccessControlEnforcer.checkDefaultEnforcer(" + "fsOwner=" + fsOwner + "; superGroup=" + superGroup + ", inodesCount=" + (inodes != null ? inodes.length : 0) + ", snapshotId=" + snapshotId + ", path=" + path + ", ancestorIndex=" + ancestorIndex + ", doCheckOwner="+ doCheckOwner + ", ancestorAccess=" + ancestorAccess + ", parentAccess=" + parentAccess + ", access=" + access + ", subAccess=" + subAccess + ", ignoreEmptyDir=" + ignoreEmptyDir + ", isTraverseOnlyCheck=" + isTraverseOnlyCheck + ",ancestor=" + (ancestor == null ? null : ancestor.getFullPathName()) + ", parent=" + (parent == null ? null : parent.getFullPathName()) + ", inode=" + (inode == null ? null : inode.getFullPathName()) + ") : " + authzStatus ); return authzStatus; }