Java Code Examples for org.apache.hadoop.fs.permission.FsPermission#applyUMask()
The following examples show how to use
org.apache.hadoop.fs.permission.FsPermission#applyUMask() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DFSClient.java From hadoop with Apache License 2.0 | 6 votes |
/** * Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long, * Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is * a hint to where the namenode should place the file blocks. * The favored nodes hint is not persisted in HDFS. Hence it may be honored * at the creation time only. HDFS could move the blocks during balancing or * replication, to move the blocks from favored nodes. A value of null means * no favored nodes for this create */ public DFSOutputStream create(String src, FsPermission permission, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, Progressable progress, int buffersize, ChecksumOpt checksumOpt, InetSocketAddress[] favoredNodes) throws IOException { checkOpen(); if (permission == null) { permission = FsPermission.getFileDefault(); } FsPermission masked = permission.applyUMask(dfsClientConf.uMask); if(LOG.isDebugEnabled()) { LOG.debug(src + ": masked=" + masked); } final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this, src, masked, flag, createParent, replication, blockSize, progress, buffersize, dfsClientConf.createChecksum(checksumOpt), getFavoredNodesStr(favoredNodes)); beginFileLease(result.getFileId(), result); return result; }
Example 2
Source File: DFSClient.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * Create a directory (or hierarchy of directories) with the given * name and permission. * * @param src The path of the directory being created * @param permission The permission of the directory being created. * If permission == null, use {@link FsPermission#getDefault()}. * @return True if the operation success. * @see ClientProtocol#mkdirs(String, FsPermission) */ public boolean mkdirs(String src, FsPermission permission)throws IOException{ checkOpen(); if (permission == null) { permission = FsPermission.getDefault(); } FsPermission masked = permission.applyUMask(FsPermission.getUMask(conf)); LOG.debug(src + ": masked=" + masked); try { return namenode.mkdirs(src, masked); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, NSQuotaExceededException.class, DSQuotaExceededException.class); } }
Example 3
Source File: DFSClient.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * Create a new dfs file with the specified block replication * with write-progress reporting and return an output stream for writing * into the file. * * @param src stream name * @param permission The permission of the directory being created. * If permission == null, use {@link FsPermission#getDefault()}. * @param overwrite do not check for file existence if true * @param replication block replication * @return output stream * @throws IOException * @see ClientProtocol#create(String, FsPermission, String, boolean, short, long) */ public OutputStream create(String src, FsPermission permission, boolean overwrite, short replication, long blockSize, Progressable progress, int buffersize ) throws IOException { checkOpen(); if (permission == null) { permission = FsPermission.getDefault(); } FsPermission masked = permission.applyUMask(FsPermission.getUMask(conf)); LOG.debug(src + ": masked=" + masked); OutputStream result = new DFSOutputStream(src, masked, overwrite, replication, blockSize, progress, buffersize, conf.getInt("io.bytes.per.checksum", 512)); leasechecker.put(src, result); return result; }
Example 4
Source File: RawLocalFileSystem.java From lucene-solr with Apache License 2.0 | 6 votes |
protected boolean mkOneDirWithMode(Path p, File p2f, FsPermission permission) throws IOException { if (permission == null) { permission = FsPermission.getDirDefault(); } permission = permission.applyUMask(FsPermission.getUMask(getConf())); if (Shell.WINDOWS && NativeIO.isAvailable()) { try { NativeIO.Windows.createDirectoryWithMode(p2f, permission.toShort()); return true; } catch (IOException e) { if (LOG.isDebugEnabled()) { LOG.debug(String.format( "NativeIO.createDirectoryWithMode error, path = %s, mode = %o", p2f, permission.toShort()), e); } return false; } } else { boolean b = p2f.mkdir(); if (b) { setPermission(p, permission); } return b; } }
Example 5
Source File: DFSClient.java From big-c with Apache License 2.0 | 6 votes |
/** * Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long, * Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is * a hint to where the namenode should place the file blocks. * The favored nodes hint is not persisted in HDFS. Hence it may be honored * at the creation time only. HDFS could move the blocks during balancing or * replication, to move the blocks from favored nodes. A value of null means * no favored nodes for this create */ public DFSOutputStream create(String src, FsPermission permission, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, Progressable progress, int buffersize, ChecksumOpt checksumOpt, InetSocketAddress[] favoredNodes) throws IOException { checkOpen(); if (permission == null) { permission = FsPermission.getFileDefault(); } FsPermission masked = permission.applyUMask(dfsClientConf.uMask); if(LOG.isDebugEnabled()) { LOG.debug(src + ": masked=" + masked); } final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this, src, masked, flag, createParent, replication, blockSize, progress, buffersize, dfsClientConf.createChecksum(checksumOpt), getFavoredNodesStr(favoredNodes)); beginFileLease(result.getFileId(), result); return result; }
Example 6
Source File: TestDistCh.java From big-c with Apache License 2.0 | 5 votes |
static void checkFileStatus(ChPermissionStatus expected, FileStatus actual) { assertEquals(expected.getUserName(), actual.getOwner()); assertEquals(expected.getGroupName(), actual.getGroup()); FsPermission perm = expected.getPermission(); if (actual.isFile() && expected.defaultPerm) { perm = perm.applyUMask(UMASK); } assertEquals(perm, actual.getPermission()); }
Example 7
Source File: TestDistCh.java From RDFS with Apache License 2.0 | 5 votes |
static void checkFileStatus(PermissionStatus expected, FileStatus actual) { assertEquals(expected.getUserName(), actual.getOwner()); assertEquals(expected.getGroupName(), actual.getGroup()); FsPermission perm = expected.getPermission(); if (!actual.isDir()) { perm = perm.applyUMask(UMASK); } assertEquals(perm, actual.getPermission()); }
Example 8
Source File: CommonFSUtils.java From hbase with Apache License 2.0 | 5 votes |
/** * Get the file permissions specified in the configuration, if they are * enabled. * * @param fs filesystem that the file will be created on. * @param conf configuration to read for determining if permissions are * enabled and which to use * @param permssionConfKey property key in the configuration to use when * finding the permission * @return the permission to use when creating a new file on the fs. If * special permissions are not specified in the configuration, then * the default permissions on the the fs will be returned. */ public static FsPermission getFilePermissions(final FileSystem fs, final Configuration conf, final String permssionConfKey) { boolean enablePermissions = conf.getBoolean( HConstants.ENABLE_DATA_FILE_UMASK, false); if (enablePermissions) { try { FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS); // make sure that we have a mask, if not, go default. String mask = conf.get(permssionConfKey); if (mask == null) { return FsPermission.getFileDefault(); } // appy the umask FsPermission umask = new FsPermission(mask); return perm.applyUMask(umask); } catch (IllegalArgumentException e) { LOG.warn( "Incorrect umask attempted to be created: " + conf.get(permssionConfKey) + ", using default file permissions.", e); return FsPermission.getFileDefault(); } } return FsPermission.getFileDefault(); }
Example 9
Source File: RawLocalFileSystem.java From lucene-solr with Apache License 2.0 | 5 votes |
private LocalFSFileOutputStream(Path f, boolean append, FsPermission permission) throws IOException { File file = pathToFile(f); if (!append && permission == null) { permission = FsPermission.getFileDefault(); } if (permission == null) { this.fos = new FileOutputStream(file, append); } else { permission = permission.applyUMask(FsPermission.getUMask(getConf())); if (Shell.WINDOWS && NativeIO.isAvailable()) { this.fos = NativeIO.Windows.createFileOutputStreamWithMode(file, append, permission.toShort()); } else { this.fos = new FileOutputStream(file, append); boolean success = false; try { setPermission(f, permission); success = true; } finally { if (!success) { IOUtils.cleanup(LOG, this.fos); } } } } }
Example 10
Source File: NativeAzureFileSystem.java From big-c with Apache License 2.0 | 5 votes |
/** * Applies the applicable UMASK's on the given permission. * * @param permission * The permission to mask. * @param applyMode * Whether to also apply the default umask. * @return The masked persmission. */ private FsPermission applyUMask(final FsPermission permission, final UMaskApplyMode applyMode) { FsPermission newPermission = new FsPermission(permission); // Apply the default umask - this applies for new files or directories. if (applyMode == UMaskApplyMode.NewFile || applyMode == UMaskApplyMode.NewDirectory) { newPermission = newPermission .applyUMask(FsPermission.getUMask(getConf())); } return newPermission; }
Example 11
Source File: TestDistCh.java From hadoop with Apache License 2.0 | 5 votes |
static void checkFileStatus(ChPermissionStatus expected, FileStatus actual) { assertEquals(expected.getUserName(), actual.getOwner()); assertEquals(expected.getGroupName(), actual.getGroup()); FsPermission perm = expected.getPermission(); if (actual.isFile() && expected.defaultPerm) { perm = perm.applyUMask(UMASK); } assertEquals(perm, actual.getPermission()); }
Example 12
Source File: WebHdfsFileSystem.java From hadoop with Apache License 2.0 | 4 votes |
private FsPermission applyUMask(FsPermission permission) { if (permission == null) { permission = FsPermission.getDefault(); } return permission.applyUMask(FsPermission.getUMask(getConf())); }
Example 13
Source File: FileSystemTestWrapper.java From big-c with Apache License 2.0 | 4 votes |
@Override public FSDataOutputStream create(Path f, EnumSet<CreateFlag> createFlag, CreateOpts... opts) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, IOException { // Need to translate the FileContext-style options into FileSystem-style // Permissions with umask CreateOpts.Perms permOpt = CreateOpts.getOpt( CreateOpts.Perms.class, opts); FsPermission umask = FsPermission.getUMask(fs.getConf()); FsPermission permission = (permOpt != null) ? permOpt.getValue() : FsPermission.getFileDefault().applyUMask(umask); permission = permission.applyUMask(umask); // Overwrite boolean overwrite = createFlag.contains(CreateFlag.OVERWRITE); // bufferSize int bufferSize = fs.getConf().getInt( CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT); CreateOpts.BufferSize bufOpt = CreateOpts.getOpt( CreateOpts.BufferSize.class, opts); bufferSize = (bufOpt != null) ? bufOpt.getValue() : bufferSize; // replication short replication = fs.getDefaultReplication(f); CreateOpts.ReplicationFactor repOpt = CreateOpts.getOpt(CreateOpts.ReplicationFactor.class, opts); replication = (repOpt != null) ? repOpt.getValue() : replication; // blockSize long blockSize = fs.getDefaultBlockSize(f); CreateOpts.BlockSize blockOpt = CreateOpts.getOpt( CreateOpts.BlockSize.class, opts); blockSize = (blockOpt != null) ? blockOpt.getValue() : blockSize; // Progressable Progressable progress = null; CreateOpts.Progress progressOpt = CreateOpts.getOpt( CreateOpts.Progress.class, opts); progress = (progressOpt != null) ? progressOpt.getValue() : progress; return fs.create(f, permission, overwrite, bufferSize, replication, blockSize, progress); }
Example 14
Source File: WebHdfsFileSystem.java From big-c with Apache License 2.0 | 4 votes |
private FsPermission applyUMask(FsPermission permission) { if (permission == null) { permission = FsPermission.getDefault(); } return permission.applyUMask(FsPermission.getUMask(getConf())); }
Example 15
Source File: FileSystemTestWrapper.java From hadoop with Apache License 2.0 | 4 votes |
@Override public FSDataOutputStream create(Path f, EnumSet<CreateFlag> createFlag, CreateOpts... opts) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, IOException { // Need to translate the FileContext-style options into FileSystem-style // Permissions with umask CreateOpts.Perms permOpt = CreateOpts.getOpt( CreateOpts.Perms.class, opts); FsPermission umask = FsPermission.getUMask(fs.getConf()); FsPermission permission = (permOpt != null) ? permOpt.getValue() : FsPermission.getFileDefault().applyUMask(umask); permission = permission.applyUMask(umask); // Overwrite boolean overwrite = createFlag.contains(CreateFlag.OVERWRITE); // bufferSize int bufferSize = fs.getConf().getInt( CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT); CreateOpts.BufferSize bufOpt = CreateOpts.getOpt( CreateOpts.BufferSize.class, opts); bufferSize = (bufOpt != null) ? bufOpt.getValue() : bufferSize; // replication short replication = fs.getDefaultReplication(f); CreateOpts.ReplicationFactor repOpt = CreateOpts.getOpt(CreateOpts.ReplicationFactor.class, opts); replication = (repOpt != null) ? repOpt.getValue() : replication; // blockSize long blockSize = fs.getDefaultBlockSize(f); CreateOpts.BlockSize blockOpt = CreateOpts.getOpt( CreateOpts.BlockSize.class, opts); blockSize = (blockOpt != null) ? blockOpt.getValue() : blockSize; // Progressable Progressable progress = null; CreateOpts.Progress progressOpt = CreateOpts.getOpt( CreateOpts.Progress.class, opts); progress = (progressOpt != null) ? progressOpt.getValue() : progress; return fs.create(f, permission, overwrite, bufferSize, replication, blockSize, progress); }
Example 16
Source File: DFSClient.java From RDFS with Apache License 2.0 | 4 votes |
/** * Create a new dfs file with the specified block replication * with write-progress reporting and return an output stream for writing * into the file. * * @param src stream name * @param permission The permission of the directory being created. * If permission == null, use {@link FsPermission#getDefault()}. * @param overwrite do not check for file existence if true * @param replication block replication * @param forceSync a hdfs sync() operation invokes local filesystem sync * on datanodes. * @param doParallelWrites write replicas in parallel * @param favoredNodes nodes on which to place replicas if possible * @return output stream * @throws IOException * @see ClientProtocol#create(String, FsPermission, String, boolean, short, long) */ public OutputStream create(String src, FsPermission permission, boolean overwrite, boolean createParent, short replication, long blockSize, Progressable progress, int buffersize, int bytesPerChecksum, boolean forceSync, boolean doParallelWrites, InetSocketAddress[] favoredNodes) throws IOException { checkOpen(); if (permission == null) { permission = FsPermission.getDefault(); } boolean success = false; try { FsPermission masked = permission.applyUMask(FsPermission.getUMask(conf)); LOG.debug(src + ": masked=" + masked); // For each of the favored nodes, mock up a DatanodeInfo with the IP // address and port of that node. DatanodeInfo[] favoredNodeInfos = null; if (favoredNodes != null) { favoredNodeInfos = new DatanodeInfo[favoredNodes.length]; for (int i = 0; i < favoredNodes.length; i++) { favoredNodeInfos[i] = new DatanodeInfo(new DatanodeID( favoredNodes[i].getAddress().getHostAddress() + ":" + favoredNodes[i].getPort())); } } OutputStream result = new DFSOutputStream(this, src, masked, overwrite, createParent, replication, blockSize, progress, buffersize, bytesPerChecksum, forceSync, doParallelWrites, favoredNodeInfos); leasechecker.put(src, result); metrics.incNumCreateFileOps(); if (stats != null) { stats.incrementFilesCreated(); } success = true; return result; } finally { if (!success && namenodeProtocolProxy.isMethodSupported( "abandonFile", String.class, String.class)) { try { namenode.abandonFile(src, clientName); } catch (RemoteException e) { if (e.unwrapRemoteException() instanceof LeaseExpiredException) { LOG.debug(String.format( "client %s attempting to abandon file %s which it does not own", clientName, src), e ); } else { throw e; } } } } }
Example 17
Source File: FileContext.java From big-c with Apache License 2.0 | 3 votes |
/** * Create or overwrite file on indicated path and returns an output stream for * writing into the file. * * @param f the file name to open * @param createFlag gives the semantics of create; see {@link CreateFlag} * @param opts file creation options; see {@link Options.CreateOpts}. * <ul> * <li>Progress - to report progress on the operation - default null * <li>Permission - umask is applied against permisssion: default is * FsPermissions:getDefault() * * <li>CreateParent - create missing parent path; default is to not * to create parents * <li>The defaults for the following are SS defaults of the file * server implementing the target path. Not all parameters make sense * for all kinds of file system - eg. localFS ignores Blocksize, * replication, checksum * <ul> * <li>BufferSize - buffersize used in FSDataOutputStream * <li>Blocksize - block size for file blocks * <li>ReplicationFactor - replication for blocks * <li>ChecksumParam - Checksum parameters. server default is used * if not specified. * </ul> * </ul> * * @return {@link FSDataOutputStream} for created file * * @throws AccessControlException If access is denied * @throws FileAlreadyExistsException If file <code>f</code> already exists * @throws FileNotFoundException If parent of <code>f</code> does not exist * and <code>createParent</code> is false * @throws ParentNotDirectoryException If parent of <code>f</code> is not a * directory. * @throws UnsupportedFileSystemException If file system for <code>f</code> is * not supported * @throws IOException If an I/O error occurred * * Exceptions applicable to file systems accessed over RPC: * @throws RpcClientException If an exception occurred in the RPC client * @throws RpcServerException If an exception occurred in the RPC server * @throws UnexpectedServerException If server implementation throws * undeclared exception to RPC server * * RuntimeExceptions: * @throws InvalidPathException If path <code>f</code> is not valid */ public FSDataOutputStream create(final Path f, final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, IOException { Path absF = fixRelativePart(f); // If one of the options is a permission, extract it & apply umask // If not, add a default Perms and apply umask; // AbstractFileSystem#create CreateOpts.Perms permOpt = CreateOpts.getOpt(CreateOpts.Perms.class, opts); FsPermission permission = (permOpt != null) ? permOpt.getValue() : FILE_DEFAULT_PERM; permission = permission.applyUMask(umask); final CreateOpts[] updatedOpts = CreateOpts.setOpt(CreateOpts.perms(permission), opts); return new FSLinkResolver<FSDataOutputStream>() { @Override public FSDataOutputStream next(final AbstractFileSystem fs, final Path p) throws IOException { return fs.create(p, createFlag, updatedOpts); } }.resolve(this, absF); }
Example 18
Source File: DFSClient.java From hadoop with Apache License 2.0 | 3 votes |
/** * Create a directory (or hierarchy of directories) with the given * name and permission. * * @param src The path of the directory being created * @param permission The permission of the directory being created. * If permission == null, use {@link FsPermission#getDefault()}. * @param createParent create missing parent directory if true * * @return True if the operation success. * * @see ClientProtocol#mkdirs(String, FsPermission, boolean) */ public boolean mkdirs(String src, FsPermission permission, boolean createParent) throws IOException { if (permission == null) { permission = FsPermission.getDefault(); } FsPermission masked = permission.applyUMask(dfsClientConf.uMask); return primitiveMkdir(src, masked, createParent); }
Example 19
Source File: INodeFile.java From RDFS with Apache License 2.0 | 2 votes |
/** * Set the {@link FsPermission} of this {@link INodeFile}. * Since this is a file, * the {@link FsAction#EXECUTE} action, if any, is ignored. */ protected void setPermission(FsPermission permission) { super.setPermission(permission.applyUMask(UMASK)); }
Example 20
Source File: INodeFile.java From hadoop-gpu with Apache License 2.0 | 2 votes |
/** * Set the {@link FsPermission} of this {@link INodeFile}. * Since this is a file, * the {@link FsAction#EXECUTE} action, if any, is ignored. */ protected void setPermission(FsPermission permission) { super.setPermission(permission.applyUMask(UMASK)); }