org.apache.hadoop.hdfs.server.namenode.INodeAttributes Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.INodeAttributes.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SnapshotFSImageFormat.java From hadoop with Apache License 2.0 | 6 votes |
/** * Save SnapshotDiff list for an INodeDirectoryWithSnapshot. * @param sNode The directory that the SnapshotDiff list belongs to. * @param out The {@link DataOutput} to write. */ private static <N extends INode, A extends INodeAttributes, D extends AbstractINodeDiff<N, A, D>> void saveINodeDiffs(final AbstractINodeDiffList<N, A, D> diffs, final DataOutput out, ReferenceMap referenceMap) throws IOException { // Record the diffs in reversed order, so that we can find the correct // reference for INodes in the created list when loading the FSImage if (diffs == null) { out.writeInt(-1); // no diffs } else { final List<D> list = diffs.asList(); final int size = list.size(); out.writeInt(size); for (int i = size - 1; i >= 0; i--) { list.get(i).write(out, referenceMap); } } }
Example #2
Source File: SnapshotFSImageFormat.java From big-c with Apache License 2.0 | 6 votes |
/** * Save SnapshotDiff list for an INodeDirectoryWithSnapshot. * @param sNode The directory that the SnapshotDiff list belongs to. * @param out The {@link DataOutput} to write. */ private static <N extends INode, A extends INodeAttributes, D extends AbstractINodeDiff<N, A, D>> void saveINodeDiffs(final AbstractINodeDiffList<N, A, D> diffs, final DataOutput out, ReferenceMap referenceMap) throws IOException { // Record the diffs in reversed order, so that we can find the correct // reference for INodes in the created list when loading the FSImage if (diffs == null) { out.writeInt(-1); // no diffs } else { final List<D> list = diffs.asList(); final int size = list.size(); out.writeInt(size); for (int i = size - 1; i >= 0; i--) { list.get(i).write(out, referenceMap); } } }
Example #3
Source File: RangerHdfsAuthorizer.java From ranger with Apache License 2.0 | 6 votes |
@Override public INodeAttributes getAttributes(String fullPath, INodeAttributes inode) { if(LOG.isDebugEnabled()) { LOG.debug("==> RangerHdfsAuthorizer.getAttributes(" + fullPath + ")"); } INodeAttributes ret = null; try { activatePluginClassLoader(); ret = rangerHdfsAuthorizerImpl.getAttributes(fullPath,inode); // return default attributes } finally { deactivatePluginClassLoader(); } if(LOG.isDebugEnabled()) { LOG.debug("<== RangerHdfsAuthorizer.getAttributes(" + fullPath + "): " + ret); } return ret; }
Example #4
Source File: RangerHdfsAuthorizer.java From ranger with Apache License 2.0 | 6 votes |
@Override public INodeAttributes getAttributes(String[] pathElements, INodeAttributes inode) { if(LOG.isDebugEnabled()) { LOG.debug("==> RangerHdfsAuthorizer.getAttributes(pathElementsCount=" + (pathElements == null ? 0 : pathElements.length) + ")"); } INodeAttributes ret = null; try { activatePluginClassLoader(); ret = rangerHdfsAuthorizerImpl.getAttributes(pathElements,inode); } finally { deactivatePluginClassLoader(); } if(LOG.isDebugEnabled()) { LOG.debug("<== RangerHdfsAuthorizer.getAttributes(pathElementsCount=" + (pathElements == null ? 0 : pathElements.length) + "): " + ret); } return ret; }
Example #5
Source File: RangerHdfsAuthorizer.java From ranger with Apache License 2.0 | 5 votes |
@Override public INodeAttributes getAttributes(String fullPath, INodeAttributes inode) { if(LOG.isDebugEnabled()) { LOG.debug("==> RangerHdfsAuthorizer.getAttributes(" + fullPath + ")"); } INodeAttributes ret = inode; // return default attributes if(LOG.isDebugEnabled()) { LOG.debug("<== RangerHdfsAuthorizer.getAttributes(" + fullPath + "): " + ret); } return ret; }
Example #6
Source File: RangerHdfsAuthorizer.java From ranger with Apache License 2.0 | 5 votes |
@Override public INodeAttributes getAttributes(String[] pathElements, INodeAttributes inode) { if(LOG.isDebugEnabled()) { LOG.debug("==> RangerHdfsAuthorizer.getAttributes(pathElementsCount=" + (pathElements == null ? 0 : pathElements.length) + ")"); } INodeAttributes ret = inode; // return default attributes if(LOG.isDebugEnabled()) { LOG.debug("<== RangerHdfsAuthorizer.getAttributes(pathElementsCount=" + (pathElements == null ? 0 : pathElements.length) + "): " + ret); } return ret; }
Example #7
Source File: RangerHdfsAuthorizer.java From ranger with Apache License 2.0 | 5 votes |
private AuthzStatus isAccessAllowedForTraversal(INode inode, INodeAttributes inodeAttribs, String path, String user, Set<String> groups, RangerHdfsPlugin plugin, RangerHdfsAuditHandler auditHandler, boolean skipAuditOnAllow) { final AuthzStatus ret; String pathOwner = inodeAttribs != null ? inodeAttribs.getUserName() : null; FsAction access = FsAction.EXECUTE; if (pathOwner == null) { pathOwner = inode.getUserName(); } if (RangerHadoopConstants.HDFS_ROOT_FOLDER_PATH_ALT.equals(path)) { path = HDFS_ROOT_FOLDER_PATH; } if (LOG.isDebugEnabled()) { LOG.debug("==> RangerAccessControlEnforcer.isAccessAllowedForTraversal(" + path + ", " + access + ", " + user + ", " + skipAuditOnAllow + ")"); } RangerHdfsAccessRequest request = new RangerHdfsAccessRequest(inode, path, pathOwner, access, EXECUTE_ACCCESS_TYPE, user, groups); RangerAccessResult result = plugin.isAccessAllowed(request, null); if (result != null && result.getIsAccessDetermined() && !result.getIsAllowed()) { ret = AuthzStatus.DENY; } else { ret = AuthzStatus.ALLOW; } if (ret == AuthzStatus.DENY || (!skipAuditOnAllow && result != null && result.getIsAccessDetermined())) { auditHandler.processResult(result); } if (LOG.isDebugEnabled()) { LOG.debug("<== RangerAccessControlEnforcer.isAccessAllowedForTraversal(" + path + ", " + access + ", " + user + ", " + skipAuditOnAllow + "): " + ret); } return ret; }
Example #8
Source File: RangerHdfsAuthorizer.java From ranger with Apache License 2.0 | 4 votes |
private AuthzStatus traverseOnlyCheck(INode inode, INodeAttributes[] inodeAttrs, String path, byte[][] components, INode parent, INode ancestor, int ancestorIndex, String user, Set<String> groups, RangerHdfsPlugin plugin, RangerHdfsAuditHandler auditHandler) { if (LOG.isDebugEnabled()) { LOG.debug("==> RangerAccessControlEnforcer.traverseOnlyCheck(" + "path=" + path + ", user=" + user + ", groups=" + groups + ")"); } final AuthzStatus ret; INode nodeToCheck = inode; INodeAttributes nodeAttribs = inodeAttrs.length > 0 ? inodeAttrs[inodeAttrs.length - 1] : null; boolean skipAuditOnAllow = false; String resourcePath = path; if (nodeToCheck == null || nodeToCheck.isFile()) { skipAuditOnAllow = true; if (parent != null) { nodeToCheck = parent; nodeAttribs = inodeAttrs.length > 1 ? inodeAttrs[inodeAttrs.length - 2] : null; resourcePath = inodeAttrs.length > 0 ? DFSUtil.byteArray2PathString(components, 0, inodeAttrs.length - 1) : HDFS_ROOT_FOLDER_PATH; } else if (ancestor != null) { nodeToCheck = ancestor; nodeAttribs = inodeAttrs.length > ancestorIndex ? inodeAttrs[ancestorIndex] : null; resourcePath = nodeAttribs != null ? DFSUtil.byteArray2PathString(components, 0, ancestorIndex+1) : HDFS_ROOT_FOLDER_PATH; } } if (nodeToCheck != null) { if (resourcePath.length() > 1) { if (resourcePath.endsWith(HDFS_ROOT_FOLDER_PATH)) { resourcePath = resourcePath.substring(0, resourcePath.length()-1); } } ret = isAccessAllowedForTraversal(nodeToCheck, nodeAttribs, resourcePath, user, groups, plugin, auditHandler, skipAuditOnAllow); } else { ret = AuthzStatus.ALLOW; } if (LOG.isDebugEnabled()) { LOG.debug("<== RangerAccessControlEnforcer.traverseOnlyCheck(" + "path=" + path + ", resourcePath=" + resourcePath + ", user=" + user + ", groups=" + groups + ") : " + ret); } return ret; }
Example #9
Source File: RangerHdfsAuthorizer.java From ranger with Apache License 2.0 | 4 votes |
private AuthzStatus checkDefaultEnforcer(String fsOwner, String superGroup, UserGroupInformation ugi, INodeAttributes[] inodeAttrs, INode[] inodes, byte[][] pathByNameArr, int snapshotId, String path, int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess, FsAction access, FsAction subAccess, boolean ignoreEmptyDir, boolean isTraverseOnlyCheck, INode ancestor, INode parent, INode inode, RangerHdfsAuditHandler auditHandler ) throws AccessControlException { if (LOG.isDebugEnabled()) { LOG.debug("==> RangerAccessControlEnforcer.checkDefaultEnforcer(" + "fsOwner=" + fsOwner + "; superGroup=" + superGroup + ", inodesCount=" + (inodes != null ? inodes.length : 0) + ", snapshotId=" + snapshotId + ", path=" + path + ", ancestorIndex=" + ancestorIndex + ", doCheckOwner=" + doCheckOwner + ", ancestorAccess=" + ancestorAccess + ", parentAccess=" + parentAccess + ", access=" + access + ", subAccess=" + subAccess + ", ignoreEmptyDir=" + ignoreEmptyDir + ", isTraverseOnlyCheck=" + isTraverseOnlyCheck + ",ancestor=" + (ancestor == null ? null : ancestor.getFullPathName()) + ", parent=" + (parent == null ? null : parent.getFullPathName()) + ", inode=" + (inode == null ? null : inode.getFullPathName()) + ")"); } AuthzStatus authzStatus = AuthzStatus.NOT_DETERMINED; if(rangerPlugin.isHadoopAuthEnabled() && defaultEnforcer != null) { RangerPerfTracer hadoopAuthPerf = null; if(RangerPerfTracer.isPerfTraceEnabled(PERF_HDFSAUTH_REQUEST_LOG)) { hadoopAuthPerf = RangerPerfTracer.getPerfTracer(PERF_HDFSAUTH_REQUEST_LOG, "RangerAccessControlEnforcer.checkDefaultEnforcer(path=" + path + ")"); } try { defaultEnforcer.checkPermission(fsOwner, superGroup, ugi, inodeAttrs, inodes, pathByNameArr, snapshotId, path, ancestorIndex, doCheckOwner, ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir); authzStatus = AuthzStatus.ALLOW; } finally { if (auditHandler != null) { INode nodeChecked = inode; FsAction action = access; if (isTraverseOnlyCheck) { if (nodeChecked == null || nodeChecked.isFile()) { if (parent != null) { nodeChecked = parent; } else if (ancestor != null) { nodeChecked = ancestor; } } action = FsAction.EXECUTE; } else if (action == null || action == FsAction.NONE) { if (parentAccess != null && parentAccess != FsAction.NONE) { nodeChecked = parent; action = parentAccess; } else if (ancestorAccess != null && ancestorAccess != FsAction.NONE) { nodeChecked = ancestor; action = ancestorAccess; } else if (subAccess != null && subAccess != FsAction.NONE) { action = subAccess; } } String pathChecked = nodeChecked != null ? nodeChecked.getFullPathName() : path; auditHandler.logHadoopEvent(pathChecked, action, authzStatus == AuthzStatus.ALLOW); } RangerPerfTracer.log(hadoopAuthPerf); } } LOG.debug("<== RangerAccessControlEnforcer.checkDefaultEnforcer(" + "fsOwner=" + fsOwner + "; superGroup=" + superGroup + ", inodesCount=" + (inodes != null ? inodes.length : 0) + ", snapshotId=" + snapshotId + ", path=" + path + ", ancestorIndex=" + ancestorIndex + ", doCheckOwner="+ doCheckOwner + ", ancestorAccess=" + ancestorAccess + ", parentAccess=" + parentAccess + ", access=" + access + ", subAccess=" + subAccess + ", ignoreEmptyDir=" + ignoreEmptyDir + ", isTraverseOnlyCheck=" + isTraverseOnlyCheck + ",ancestor=" + (ancestor == null ? null : ancestor.getFullPathName()) + ", parent=" + (parent == null ? null : parent.getFullPathName()) + ", inode=" + (inode == null ? null : inode.getFullPathName()) + ") : " + authzStatus ); return authzStatus; }
Example #10
Source File: RangerHdfsAuthorizer.java From ranger with Apache License 2.0 | 4 votes |
private AuthzStatus isAccessAllowed(INode inode, INodeAttributes inodeAttribs, String path, FsAction access, String user, Set<String> groups, RangerHdfsPlugin plugin, RangerHdfsAuditHandler auditHandler) { AuthzStatus ret = null; String pathOwner = inodeAttribs != null ? inodeAttribs.getUserName() : null; if(pathOwner == null && inode != null) { pathOwner = inode.getUserName(); } if (RangerHadoopConstants.HDFS_ROOT_FOLDER_PATH_ALT.equals(path)) { path = HDFS_ROOT_FOLDER_PATH; } if(LOG.isDebugEnabled()) { LOG.debug("==> RangerAccessControlEnforcer.isAccessAllowed(" + path + ", " + access + ", " + user + ")"); } Set<String> accessTypes = access2ActionListMapper.get(access); if(accessTypes == null) { LOG.warn("RangerAccessControlEnforcer.isAccessAllowed(" + path + ", " + access + ", " + user + "): no Ranger accessType found for " + access); accessTypes = access2ActionListMapper.get(FsAction.NONE); } for(String accessType : accessTypes) { RangerHdfsAccessRequest request = new RangerHdfsAccessRequest(inode, path, pathOwner, access, accessType, user, groups); RangerAccessResult result = plugin.isAccessAllowed(request, auditHandler); if (result == null || !result.getIsAccessDetermined()) { ret = AuthzStatus.NOT_DETERMINED; // don't break yet; subsequent accessType could be denied } else if(! result.getIsAllowed()) { // explicit deny ret = AuthzStatus.DENY; break; } else { // allowed if(!AuthzStatus.NOT_DETERMINED.equals(ret)) { // set to ALLOW only if there was no NOT_DETERMINED earlier ret = AuthzStatus.ALLOW; } } } if(ret == null) { ret = AuthzStatus.NOT_DETERMINED; } if(LOG.isDebugEnabled()) { LOG.debug("<== RangerAccessControlEnforcer.isAccessAllowed(" + path + ", " + access + ", " + user + "): " + ret); } return ret; }
Example #11
Source File: RangerHdfsAuthorizer.java From ranger with Apache License 2.0 | 4 votes |
private AuthzStatus isAccessAllowedForHierarchy(INode inode, INodeAttributes inodeAttribs, String path, FsAction access, String user, Set<String> groups, RangerHdfsPlugin plugin) { AuthzStatus ret = null; String pathOwner = inodeAttribs != null ? inodeAttribs.getUserName() : null; if (pathOwner == null && inode != null) { pathOwner = inode.getUserName(); } if (RangerHadoopConstants.HDFS_ROOT_FOLDER_PATH_ALT.equals(path)) { path = HDFS_ROOT_FOLDER_PATH; } if (LOG.isDebugEnabled()) { LOG.debug("==> RangerAccessControlEnforcer.isAccessAllowedForHierarchy(" + path + ", " + access + ", " + user + ")"); } if (path != null) { Set<String> accessTypes = access2ActionListMapper.get(access); if (accessTypes == null) { LOG.warn("RangerAccessControlEnforcer.isAccessAllowedForHierarchy(" + path + ", " + access + ", " + user + "): no Ranger accessType found for " + access); accessTypes = access2ActionListMapper.get(FsAction.NONE); } String subDirPath = path; if (subDirPath.charAt(subDirPath.length() - 1) != Path.SEPARATOR_CHAR) { subDirPath = subDirPath + Character.toString(Path.SEPARATOR_CHAR); } subDirPath = subDirPath + rangerPlugin.getRandomizedWildcardPathName(); for (String accessType : accessTypes) { RangerHdfsAccessRequest request = new RangerHdfsAccessRequest(null, subDirPath, pathOwner, access, accessType, user, groups); RangerAccessResult result = plugin.isAccessAllowed(request, null); if (result == null || !result.getIsAccessDetermined()) { ret = AuthzStatus.NOT_DETERMINED; // don't break yet; subsequent accessType could be denied } else if(! result.getIsAllowed()) { // explicit deny ret = AuthzStatus.DENY; break; } else { // allowed if(!AuthzStatus.NOT_DETERMINED.equals(ret)) { // set to ALLOW only if there was no NOT_DETERMINED earlier ret = AuthzStatus.ALLOW; } } } } if(ret == null) { ret = AuthzStatus.NOT_DETERMINED; } if (LOG.isDebugEnabled()) { LOG.debug("<== RangerAccessControlEnforcer.isAccessAllowedForHierarchy(" + path + ", " + access + ", " + user + "): " + ret); } return ret; }