Java Code Examples for org.apache.hadoop.fs.permission.FsAction#NONE
The following examples show how to use
org.apache.hadoop.fs.permission.FsAction#NONE .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHttpFSFileSystemLocalFileSystem.java From hadoop with Apache License 2.0 | 6 votes |
@Override protected void testSetPermission() throws Exception { if (Path.WINDOWS) { FileSystem fs = FileSystem.get(getProxiedFSConf()); Path path = new Path(getProxiedFSTestDir(), "foodir"); fs.mkdirs(path); fs = getHttpFSFileSystem(); FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE); fs.setPermission(path, permission1); fs.close(); fs = FileSystem.get(getProxiedFSConf()); FileStatus status1 = fs.getFileStatus(path); fs.close(); FsPermission permission2 = status1.getPermission(); Assert.assertEquals(permission2, permission1); // sticky bit not supported on Windows with local file system, so the // subclass skips that part of the test } else { super.testSetPermission(); } }
Example 2
Source File: TestBlobMetadata.java From hadoop with Apache License 2.0 | 6 votes |
@SuppressWarnings("deprecation") @Test public void testPermissionMetadata() throws Exception { FsPermission justMe = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE); Path selfishFile = new Path("/noOneElse"); fs.create(selfishFile, justMe, true, 4096, fs.getDefaultReplication(), fs.getDefaultBlockSize(), null).close(); HashMap<String, String> metadata = backingStore .getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile)); assertNotNull(metadata); String storedPermission = metadata.get("hdi_permission"); assertEquals(getExpectedPermissionString("rw-------"), storedPermission); FileStatus retrievedStatus = fs.getFileStatus(selfishFile); assertNotNull(retrievedStatus); assertEquals(justMe, retrievedStatus.getPermission()); assertEquals(getExpectedOwner(), retrievedStatus.getOwner()); assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT, retrievedStatus.getGroup()); }
Example 3
Source File: BaseTestHttpFSWith.java From hadoop with Apache License 2.0 | 6 votes |
private void testCreate(Path path, boolean override) throws Exception { FileSystem fs = getHttpFSFileSystem(); FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE); OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024, (short) 2, 100 * 1024 * 1024, null); os.write(1); os.close(); fs.close(); fs = FileSystem.get(getProxiedFSConf()); FileStatus status = fs.getFileStatus(path); if (!isLocalFS()) { Assert.assertEquals(status.getReplication(), 2); Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024); } Assert.assertEquals(status.getPermission(), permission); InputStream is = fs.open(path); Assert.assertEquals(is.read(), 1); is.close(); fs.close(); }
Example 4
Source File: BaseTestHttpFSWith.java From big-c with Apache License 2.0 | 6 votes |
private void testCreate(Path path, boolean override) throws Exception { FileSystem fs = getHttpFSFileSystem(); FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE); OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024, (short) 2, 100 * 1024 * 1024, null); os.write(1); os.close(); fs.close(); fs = FileSystem.get(getProxiedFSConf()); FileStatus status = fs.getFileStatus(path); if (!isLocalFS()) { Assert.assertEquals(status.getReplication(), 2); Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024); } Assert.assertEquals(status.getPermission(), permission); InputStream is = fs.open(path); Assert.assertEquals(is.read(), 1); is.close(); fs.close(); }
Example 5
Source File: TestHttpFSFileSystemLocalFileSystem.java From big-c with Apache License 2.0 | 6 votes |
@Override protected void testSetPermission() throws Exception { if (Path.WINDOWS) { FileSystem fs = FileSystem.get(getProxiedFSConf()); Path path = new Path(getProxiedFSTestDir(), "foodir"); fs.mkdirs(path); fs = getHttpFSFileSystem(); FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE); fs.setPermission(path, permission1); fs.close(); fs = FileSystem.get(getProxiedFSConf()); FileStatus status1 = fs.getFileStatus(path); fs.close(); FsPermission permission2 = status1.getPermission(); Assert.assertEquals(permission2, permission1); // sticky bit not supported on Windows with local file system, so the // subclass skips that part of the test } else { super.testSetPermission(); } }
Example 6
Source File: TestBlobMetadata.java From big-c with Apache License 2.0 | 6 votes |
@SuppressWarnings("deprecation") @Test public void testPermissionMetadata() throws Exception { FsPermission justMe = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE); Path selfishFile = new Path("/noOneElse"); fs.create(selfishFile, justMe, true, 4096, fs.getDefaultReplication(), fs.getDefaultBlockSize(), null).close(); HashMap<String, String> metadata = backingStore .getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile)); assertNotNull(metadata); String storedPermission = metadata.get("hdi_permission"); assertEquals(getExpectedPermissionString("rw-------"), storedPermission); FileStatus retrievedStatus = fs.getFileStatus(selfishFile); assertNotNull(retrievedStatus); assertEquals(justMe, retrievedStatus.getPermission()); assertEquals(getExpectedOwner(), retrievedStatus.getOwner()); assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT, retrievedStatus.getGroup()); }
Example 7
Source File: FTPFileSystem.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private FsAction getFsAction(int accessGroup, FTPFile ftpFile) { FsAction action = FsAction.NONE; if (ftpFile.hasPermission(accessGroup, FTPFile.READ_PERMISSION)) { action.or(FsAction.READ); } if (ftpFile.hasPermission(accessGroup, FTPFile.WRITE_PERMISSION)) { action.or(FsAction.WRITE); } if (ftpFile.hasPermission(accessGroup, FTPFile.EXECUTE_PERMISSION)) { action.or(FsAction.EXECUTE); } return action; }
Example 8
Source File: SentryPermissions.java From incubator-sentry with Apache License 2.0 | 5 votes |
private void constructAclEntry(String role, FsAction permission, Map<String, FsAction> groupPerms) { RoleInfo roleInfo = roles.get(role); if (roleInfo != null) { for (String group : roleInfo.groups) { FsAction fsAction = groupPerms.get(group); if (fsAction == null) { fsAction = FsAction.NONE; } groupPerms.put(group, fsAction.or(permission)); } } }
Example 9
Source File: UpdateableAuthzPermissions.java From incubator-sentry with Apache License 2.0 | 5 votes |
static FsAction getFAction(String sentryPriv) { String[] strPrivs = sentryPriv.trim().split(","); FsAction retVal = FsAction.NONE; for (String strPriv : strPrivs) { retVal = retVal.or(ACTION_MAPPING.get(strPriv.toUpperCase())); } return retVal; }
Example 10
Source File: FTPFileSystem.java From RDFS with Apache License 2.0 | 5 votes |
private FsAction getFsAction(int accessGroup, FTPFile ftpFile) { FsAction action = FsAction.NONE; if (ftpFile.hasPermission(accessGroup, FTPFile.READ_PERMISSION)) { action.or(FsAction.READ); } if (ftpFile.hasPermission(accessGroup, FTPFile.WRITE_PERMISSION)) { action.or(FsAction.WRITE); } if (ftpFile.hasPermission(accessGroup, FTPFile.EXECUTE_PERMISSION)) { action.or(FsAction.EXECUTE); } return action; }
Example 11
Source File: RestorableHivePartitionDataset.java From incubator-gobblin with Apache License 2.0 | 5 votes |
public void restore() throws IOException { State state = new State(this.state); this.datasetOwnerFs = ProxyUtils.getOwnerFs(state, this.datasetOwner); try (HiveProxyQueryExecutor queryExecutor = ProxyUtils .getQueryExecutor(state, this.datasetOwner, this.datasetToRestoreOwner, this.trashOwner)) { if (this.state.getPropAsBoolean(ComplianceConfigurationKeys.COMPLIANCE_JOB_SIMULATE, ComplianceConfigurationKeys.DEFAULT_COMPLIANCE_JOB_SIMULATE)) { log.info("Simulating restore of " + datasetURN() + " with " + this.datasetToRestore.datasetURN()); return; } Path trashPartitionLocation = getTrashPartitionLocation(); executeTrashTableQueries(queryExecutor); this.datasetOwnerFs.mkdirs(trashPartitionLocation.getParent()); this.datasetOwnerFs.rename(getLocation(), trashPartitionLocation); FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE); HadoopUtils .setPermissions(trashPartitionLocation.getParent(), this.datasetOwner, this.trashOwner, this.datasetOwnerFs, permission); log.info( "Moved dataset " + datasetURN() + " from " + getLocation() + " to trash location " + trashPartitionLocation); fsMove(this.datasetToRestore.getLocation(), getLocation()); HadoopUtils.setPermissions(getLocation().getParent(), this.datasetOwner, this.trashOwner, this.datasetOwnerFs, permission); log.info("Moved data from backup " + this.datasetToRestore.getLocation() + " to location " + getLocation()); executeDropPartitionQueries(queryExecutor); } }
Example 12
Source File: YarnApplicationFileUploader.java From flink with Apache License 2.0 | 5 votes |
private Path getApplicationDir(final ApplicationId applicationId) throws IOException { final Path applicationDir = getApplicationDirPath(homeDir, applicationId); if (!fileSystem.exists(applicationDir)) { final FsPermission permission = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE); fileSystem.mkdirs(applicationDir, permission); } return applicationDir; }
Example 13
Source File: FTPFileSystem.java From big-c with Apache License 2.0 | 5 votes |
private FsAction getFsAction(int accessGroup, FTPFile ftpFile) { FsAction action = FsAction.NONE; if (ftpFile.hasPermission(accessGroup, FTPFile.READ_PERMISSION)) { action.or(FsAction.READ); } if (ftpFile.hasPermission(accessGroup, FTPFile.WRITE_PERMISSION)) { action.or(FsAction.WRITE); } if (ftpFile.hasPermission(accessGroup, FTPFile.EXECUTE_PERMISSION)) { action.or(FsAction.EXECUTE); } return action; }
Example 14
Source File: TestBlobMetadata.java From big-c with Apache License 2.0 | 5 votes |
/** * Tests that WASB understands the old-style ASV metadata and changes it when * it gets the chance. */ @Test public void testOldPermissionMetadata() throws Exception { Path selfishFile = new Path("/noOneElse"); HashMap<String, String> metadata = new HashMap<String, String>(); metadata.put("asv_permission", getExpectedPermissionString("rw-------")); backingStore.setContent( AzureBlobStorageTestAccount.toMockUri(selfishFile), new byte[] { }, metadata, false, 0); FsPermission justMe = new FsPermission( FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE); FileStatus retrievedStatus = fs.getFileStatus(selfishFile); assertNotNull(retrievedStatus); assertEquals(justMe, retrievedStatus.getPermission()); assertEquals(getExpectedOwner(), retrievedStatus.getOwner()); assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT, retrievedStatus.getGroup()); FsPermission meAndYou = new FsPermission( FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.NONE); fs.setPermission(selfishFile, meAndYou); metadata = backingStore.getMetadata( AzureBlobStorageTestAccount.toMockUri(selfishFile)); assertNotNull(metadata); String storedPermission = metadata.get("hdi_permission"); assertEquals(getExpectedPermissionString("rw-rw----"), storedPermission); assertNull(metadata.get("asv_permission")); }
Example 15
Source File: TestHadoopFileSystemWrapper.java From dremio-oss with Apache License 2.0 | 5 votes |
private static FsAction toFsAction(short mode) { FsAction result = FsAction.NONE; if ((mode & 0001) != 0) { result = result.or(FsAction.EXECUTE); } if ((mode & 0002) != 0) { result = result.or(FsAction.WRITE); } if ((mode & 0004) != 0) { result = result.or(FsAction.READ); } return result; }
Example 16
Source File: FTPFileSystem.java From hadoop with Apache License 2.0 | 5 votes |
private FsAction getFsAction(int accessGroup, FTPFile ftpFile) { FsAction action = FsAction.NONE; if (ftpFile.hasPermission(accessGroup, FTPFile.READ_PERMISSION)) { action.or(FsAction.READ); } if (ftpFile.hasPermission(accessGroup, FTPFile.WRITE_PERMISSION)) { action.or(FsAction.WRITE); } if (ftpFile.hasPermission(accessGroup, FTPFile.EXECUTE_PERMISSION)) { action.or(FsAction.EXECUTE); } return action; }
Example 17
Source File: TestBlobMetadata.java From hadoop with Apache License 2.0 | 5 votes |
/** * Tests that WASB understands the old-style ASV metadata and changes it when * it gets the chance. */ @Test public void testOldPermissionMetadata() throws Exception { Path selfishFile = new Path("/noOneElse"); HashMap<String, String> metadata = new HashMap<String, String>(); metadata.put("asv_permission", getExpectedPermissionString("rw-------")); backingStore.setContent( AzureBlobStorageTestAccount.toMockUri(selfishFile), new byte[] { }, metadata, false, 0); FsPermission justMe = new FsPermission( FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE); FileStatus retrievedStatus = fs.getFileStatus(selfishFile); assertNotNull(retrievedStatus); assertEquals(justMe, retrievedStatus.getPermission()); assertEquals(getExpectedOwner(), retrievedStatus.getOwner()); assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT, retrievedStatus.getGroup()); FsPermission meAndYou = new FsPermission( FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.NONE); fs.setPermission(selfishFile, meAndYou); metadata = backingStore.getMetadata( AzureBlobStorageTestAccount.toMockUri(selfishFile)); assertNotNull(metadata); String storedPermission = metadata.get("hdi_permission"); assertEquals(getExpectedPermissionString("rw-rw----"), storedPermission); assertNull(metadata.get("asv_permission")); }
Example 18
Source File: RangerHdfsAuthorizer.java From ranger with Apache License 2.0 | 4 votes |
private AuthzStatus checkDefaultEnforcer(String fsOwner, String superGroup, UserGroupInformation ugi, INodeAttributes[] inodeAttrs, INode[] inodes, byte[][] pathByNameArr, int snapshotId, String path, int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess, FsAction access, FsAction subAccess, boolean ignoreEmptyDir, boolean isTraverseOnlyCheck, INode ancestor, INode parent, INode inode, RangerHdfsAuditHandler auditHandler ) throws AccessControlException { if (LOG.isDebugEnabled()) { LOG.debug("==> RangerAccessControlEnforcer.checkDefaultEnforcer(" + "fsOwner=" + fsOwner + "; superGroup=" + superGroup + ", inodesCount=" + (inodes != null ? inodes.length : 0) + ", snapshotId=" + snapshotId + ", path=" + path + ", ancestorIndex=" + ancestorIndex + ", doCheckOwner=" + doCheckOwner + ", ancestorAccess=" + ancestorAccess + ", parentAccess=" + parentAccess + ", access=" + access + ", subAccess=" + subAccess + ", ignoreEmptyDir=" + ignoreEmptyDir + ", isTraverseOnlyCheck=" + isTraverseOnlyCheck + ",ancestor=" + (ancestor == null ? null : ancestor.getFullPathName()) + ", parent=" + (parent == null ? null : parent.getFullPathName()) + ", inode=" + (inode == null ? null : inode.getFullPathName()) + ")"); } AuthzStatus authzStatus = AuthzStatus.NOT_DETERMINED; if(rangerPlugin.isHadoopAuthEnabled() && defaultEnforcer != null) { RangerPerfTracer hadoopAuthPerf = null; if(RangerPerfTracer.isPerfTraceEnabled(PERF_HDFSAUTH_REQUEST_LOG)) { hadoopAuthPerf = RangerPerfTracer.getPerfTracer(PERF_HDFSAUTH_REQUEST_LOG, "RangerAccessControlEnforcer.checkDefaultEnforcer(path=" + path + ")"); } try { defaultEnforcer.checkPermission(fsOwner, superGroup, ugi, inodeAttrs, inodes, pathByNameArr, snapshotId, path, ancestorIndex, doCheckOwner, ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir); authzStatus = AuthzStatus.ALLOW; } finally { if (auditHandler != null) { INode nodeChecked = inode; FsAction action = access; if (isTraverseOnlyCheck) { if (nodeChecked == null || nodeChecked.isFile()) { if (parent != null) { nodeChecked = parent; } else if (ancestor != null) { nodeChecked = ancestor; } } action = FsAction.EXECUTE; } else if (action == null || action == FsAction.NONE) { if (parentAccess != null && parentAccess != FsAction.NONE) { nodeChecked = parent; action = parentAccess; } else if (ancestorAccess != null && ancestorAccess != FsAction.NONE) { nodeChecked = ancestor; action = ancestorAccess; } else if (subAccess != null && subAccess != FsAction.NONE) { action = subAccess; } } String pathChecked = nodeChecked != null ? nodeChecked.getFullPathName() : path; auditHandler.logHadoopEvent(pathChecked, action, authzStatus == AuthzStatus.ALLOW); } RangerPerfTracer.log(hadoopAuthPerf); } } LOG.debug("<== RangerAccessControlEnforcer.checkDefaultEnforcer(" + "fsOwner=" + fsOwner + "; superGroup=" + superGroup + ", inodesCount=" + (inodes != null ? inodes.length : 0) + ", snapshotId=" + snapshotId + ", path=" + path + ", ancestorIndex=" + ancestorIndex + ", doCheckOwner="+ doCheckOwner + ", ancestorAccess=" + ancestorAccess + ", parentAccess=" + parentAccess + ", access=" + access + ", subAccess=" + subAccess + ", ignoreEmptyDir=" + ignoreEmptyDir + ", isTraverseOnlyCheck=" + isTraverseOnlyCheck + ",ancestor=" + (ancestor == null ? null : ancestor.getFullPathName()) + ", parent=" + (parent == null ? null : parent.getFullPathName()) + ", inode=" + (inode == null ? null : inode.getFullPathName()) + ") : " + authzStatus ); return authzStatus; }
Example 19
Source File: HivePartitionVersionRetentionReaper.java From incubator-gobblin with Apache License 2.0 | 4 votes |
/** * If simulate is set to true, will simply return. * If a version is pointing to a non-existing location, then drop the partition and close the jdbc connection. * If a version is pointing to the same location as of the dataset, then drop the partition and close the jdbc connection. * If a version is staging, it's data will be deleted and metadata is dropped. * IF a versions is backup, it's data will be moved to a backup dir, current metadata will be dropped and it will * be registered in the backup db. */ @Override public void clean() throws IOException { Path versionLocation = ((HivePartitionRetentionVersion) this.datasetVersion).getLocation(); Path datasetLocation = ((CleanableHivePartitionDataset) this.cleanableDataset).getLocation(); String completeName = ((HivePartitionRetentionVersion) this.datasetVersion).datasetURN(); State state = new State(this.state); this.versionOwnerFs = ProxyUtils.getOwnerFs(state, this.versionOwner); try (HiveProxyQueryExecutor queryExecutor = ProxyUtils .getQueryExecutor(state, this.versionOwner, this.backUpOwner)) { if (!this.versionOwnerFs.exists(versionLocation)) { log.info("Data versionLocation doesn't exist. Metadata will be dropped for the version " + completeName); } else if (datasetLocation.toString().equalsIgnoreCase(versionLocation.toString())) { log.info( "Dataset location is same as version location. Won't delete the data but metadata will be dropped for the version " + completeName); } else if (this.simulate) { log.info("Simulate is set to true. Won't move the version " + completeName); return; } else if (completeName.contains(ComplianceConfigurationKeys.STAGING)) { log.info("Deleting data from version " + completeName); this.versionOwnerFs.delete(versionLocation, true); } else if (completeName.contains(ComplianceConfigurationKeys.BACKUP)) { executeAlterQueries(queryExecutor); Path newVersionLocationParent = getNewVersionLocation().getParent(); log.info("Creating new dir " + newVersionLocationParent.toString()); this.versionOwnerFs.mkdirs(newVersionLocationParent); log.info("Moving data from " + versionLocation + " to " + getNewVersionLocation()); fsMove(versionLocation, getNewVersionLocation()); FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE); HadoopUtils .setPermissions(newVersionLocationParent, this.versionOwner, this.backUpOwner, this.versionOwnerFs, permission); } executeDropVersionQueries(queryExecutor); } }
Example 20
Source File: FileBasedJobLockFactory.java From incubator-gobblin with Apache License 2.0 | 4 votes |
protected static FsPermission getDefaultDirPermissions() { return new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE); }