Java Code Examples for org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite#ID_UNSPECIFIED
The following examples show how to use
org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite#ID_UNSPECIFIED .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FSDirStatAndListingOp.java From big-c with Apache License 2.0 | 6 votes |
static HdfsFileStatus getFileInfo( FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath, boolean includeStoragePolicy) throws IOException { String srcs = FSDirectory.normalizePath(src); if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) { if (fsd.getINode4DotSnapshot(srcs) != null) { return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null, BlockStoragePolicySuite.ID_UNSPECIFIED); } return null; } fsd.readLock(); try { final INodesInPath iip = fsd.getINodesInPath(srcs, resolveLink); return getFileInfo(fsd, src, iip, isRawPath, includeStoragePolicy); } finally { fsd.readUnlock(); } }
Example 2
Source File: FSDirStatAndListingOp.java From hadoop with Apache License 2.0 | 6 votes |
/** Get the file info for a specific file. * @param fsd FSDirectory * @param src The string representation of the path to the file * @param isRawPath true if a /.reserved/raw pathname was passed by the user * @param includeStoragePolicy whether to include storage policy * @return object containing information regarding the file * or null if file not found */ static HdfsFileStatus getFileInfo( FSDirectory fsd, String path, INodesInPath src, boolean isRawPath, boolean includeStoragePolicy) throws IOException { fsd.readLock(); try { final INode i = src.getLastINode(); byte policyId = includeStoragePolicy && i != null && !i.isSymlink() ? i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED; return i == null ? null : createFileStatus( fsd, path, HdfsFileStatus.EMPTY_NAME, i, policyId, src.getPathSnapshotId(), isRawPath, src); } finally { fsd.readUnlock(); } }
Example 3
Source File: FSDirStatAndListingOp.java From hadoop with Apache License 2.0 | 6 votes |
static HdfsFileStatus getFileInfo( FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath, boolean includeStoragePolicy) throws IOException { String srcs = FSDirectory.normalizePath(src); if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) { if (fsd.getINode4DotSnapshot(srcs) != null) { return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null, BlockStoragePolicySuite.ID_UNSPECIFIED); } return null; } fsd.readLock(); try { final INodesInPath iip = fsd.getINodesInPath(srcs, resolveLink); return getFileInfo(fsd, src, iip, isRawPath, includeStoragePolicy); } finally { fsd.readUnlock(); } }
Example 4
Source File: PBHelper.java From big-c with Apache License 2.0 | 6 votes |
public static HdfsFileStatus convert(HdfsFileStatusProto fs) { if (fs == null) return null; return new HdfsLocatedFileStatus( fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), fs.getBlockReplication(), fs.getBlocksize(), fs.getModificationTime(), fs.getAccessTime(), PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), fs.getFileType().equals(FileType.IS_SYMLINK) ? fs.getSymlink().toByteArray() : null, fs.getPath().toByteArray(), fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID, fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null, fs.hasChildrenNum() ? fs.getChildrenNum() : -1, fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null, fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy() : BlockStoragePolicySuite.ID_UNSPECIFIED); }
Example 5
Source File: INode.java From big-c with Apache License 2.0 | 5 votes |
/** * Count subtree {@link Quota#NAMESPACE} and {@link Quota#STORAGESPACE} usages. * Entry point for FSDirectory where blockStoragePolicyId is given its initial * value. */ public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps) { final byte storagePolicyId = isSymlink() ? BlockStoragePolicySuite.ID_UNSPECIFIED : getStoragePolicyID(); return computeQuotaUsage(bsps, storagePolicyId, new QuotaCounts.Builder().build(), true, Snapshot.CURRENT_STATE_ID); }
Example 6
Source File: Mover.java From big-c with Apache License 2.0 | 5 votes |
/** @return true if it is necessary to run another round of migration */ private boolean processFile(String fullPath, HdfsLocatedFileStatus status) { final byte policyId = status.getStoragePolicy(); // currently we ignore files with unspecified storage policy if (policyId == BlockStoragePolicySuite.ID_UNSPECIFIED) { return false; } final BlockStoragePolicy policy = blockStoragePolicies[policyId]; if (policy == null) { LOG.warn("Failed to get the storage policy of file " + fullPath); return false; } final List<StorageType> types = policy.chooseStorageTypes( status.getReplication()); final LocatedBlocks locatedBlocks = status.getBlockLocations(); boolean hasRemaining = false; final boolean lastBlkComplete = locatedBlocks.isLastBlockComplete(); List<LocatedBlock> lbs = locatedBlocks.getLocatedBlocks(); for(int i = 0; i < lbs.size(); i++) { if (i == lbs.size() - 1 && !lastBlkComplete) { // last block is incomplete, skip it continue; } LocatedBlock lb = lbs.get(i); final StorageTypeDiff diff = new StorageTypeDiff(types, lb.getStorageTypes()); if (!diff.removeOverlap(true)) { if (scheduleMoves4Block(diff, lb)) { hasRemaining |= (diff.existing.size() > 1 && diff.expected.size() > 1); } } } return hasRemaining; }
Example 7
Source File: FSDirStatAndListingOp.java From big-c with Apache License 2.0 | 5 votes |
/** * Currently we only support "ls /xxx/.snapshot" which will return all the * snapshots of a directory. The FSCommand Ls will first call getFileInfo to * make sure the file/directory exists (before the real getListing call). * Since we do not have a real INode for ".snapshot", we return an empty * non-null HdfsFileStatus here. */ private static HdfsFileStatus getFileInfo4DotSnapshot( FSDirectory fsd, String src) throws UnresolvedLinkException { if (fsd.getINode4DotSnapshot(src) != null) { return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null, BlockStoragePolicySuite.ID_UNSPECIFIED); } return null; }
Example 8
Source File: JsonUtil.java From big-c with Apache License 2.0 | 5 votes |
/** Convert a Json map to a HdfsFileStatus object. */ public static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) { if (json == null) { return null; } final Map<?, ?> m = includesType ? (Map<?, ?>)json.get(FileStatus.class.getSimpleName()) : json; final String localName = (String) m.get("pathSuffix"); final PathType type = PathType.valueOf((String) m.get("type")); final byte[] symlink = type != PathType.SYMLINK? null : DFSUtil.string2Bytes((String)m.get("symlink")); final long len = ((Number) m.get("length")).longValue(); final String owner = (String) m.get("owner"); final String group = (String) m.get("group"); final FsPermission permission = toFsPermission((String) m.get("permission"), (Boolean)m.get("aclBit"), (Boolean)m.get("encBit")); final long aTime = ((Number) m.get("accessTime")).longValue(); final long mTime = ((Number) m.get("modificationTime")).longValue(); final long blockSize = ((Number) m.get("blockSize")).longValue(); final short replication = ((Number) m.get("replication")).shortValue(); final long fileId = m.containsKey("fileId") ? ((Number) m.get("fileId")).longValue() : INodeId.GRANDFATHER_INODE_ID; final int childrenNum = getInt(m, "childrenNum", -1); final byte storagePolicy = m.containsKey("storagePolicy") ? (byte) ((Number) m.get("storagePolicy")).longValue() : BlockStoragePolicySuite.ID_UNSPECIFIED; return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication, blockSize, mTime, aTime, permission, owner, group, symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, null, storagePolicy); }
Example 9
Source File: INode.java From hadoop with Apache License 2.0 | 5 votes |
/** * Count subtree {@link Quota#NAMESPACE} and {@link Quota#STORAGESPACE} usages. * Entry point for FSDirectory where blockStoragePolicyId is given its initial * value. */ public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps) { final byte storagePolicyId = isSymlink() ? BlockStoragePolicySuite.ID_UNSPECIFIED : getStoragePolicyID(); return computeQuotaUsage(bsps, storagePolicyId, new QuotaCounts.Builder().build(), true, Snapshot.CURRENT_STATE_ID); }
Example 10
Source File: FSDirStatAndListingOp.java From hadoop with Apache License 2.0 | 5 votes |
/** * Currently we only support "ls /xxx/.snapshot" which will return all the * snapshots of a directory. The FSCommand Ls will first call getFileInfo to * make sure the file/directory exists (before the real getListing call). * Since we do not have a real INode for ".snapshot", we return an empty * non-null HdfsFileStatus here. */ private static HdfsFileStatus getFileInfo4DotSnapshot( FSDirectory fsd, String src) throws UnresolvedLinkException { if (fsd.getINode4DotSnapshot(src) != null) { return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null, BlockStoragePolicySuite.ID_UNSPECIFIED); } return null; }
Example 11
Source File: Mover.java From hadoop with Apache License 2.0 | 5 votes |
/** @return true if it is necessary to run another round of migration */ private boolean processFile(String fullPath, HdfsLocatedFileStatus status) { final byte policyId = status.getStoragePolicy(); // currently we ignore files with unspecified storage policy if (policyId == BlockStoragePolicySuite.ID_UNSPECIFIED) { return false; } final BlockStoragePolicy policy = blockStoragePolicies[policyId]; if (policy == null) { LOG.warn("Failed to get the storage policy of file " + fullPath); return false; } final List<StorageType> types = policy.chooseStorageTypes( status.getReplication()); final LocatedBlocks locatedBlocks = status.getBlockLocations(); boolean hasRemaining = false; final boolean lastBlkComplete = locatedBlocks.isLastBlockComplete(); List<LocatedBlock> lbs = locatedBlocks.getLocatedBlocks(); for(int i = 0; i < lbs.size(); i++) { if (i == lbs.size() - 1 && !lastBlkComplete) { // last block is incomplete, skip it continue; } LocatedBlock lb = lbs.get(i); final StorageTypeDiff diff = new StorageTypeDiff(types, lb.getStorageTypes()); if (!diff.removeOverlap(true)) { if (scheduleMoves4Block(diff, lb)) { hasRemaining |= (diff.existing.size() > 1 && diff.expected.size() > 1); } } } return hasRemaining; }
Example 12
Source File: StoragePolicyAdmin.java From big-c with Apache License 2.0 | 5 votes |
@Override public int run(Configuration conf, List<String> args) throws IOException { final String path = StringUtils.popOptionWithArgument("-path", args); if (path == null) { System.err.println("Please specify the path with -path.\nUsage:" + getLongUsage()); return 1; } final DistributedFileSystem dfs = AdminHelper.getDFS(conf); try { HdfsFileStatus status = dfs.getClient().getFileInfo(path); if (status == null) { System.err.println("File/Directory does not exist: " + path); return 2; } byte storagePolicyId = status.getStoragePolicy(); if (storagePolicyId == BlockStoragePolicySuite.ID_UNSPECIFIED) { System.out.println("The storage policy of " + path + " is unspecified"); return 0; } BlockStoragePolicy[] policies = dfs.getStoragePolicies(); for (BlockStoragePolicy p : policies) { if (p.getId() == storagePolicyId) { System.out.println("The storage policy of " + path + ":\n" + p); return 0; } } } catch (Exception e) { System.err.println(AdminHelper.prettifyException(e)); return 2; } System.err.println("Cannot identify the storage policy for " + path); return 2; }
Example 13
Source File: FSEditLogOp.java From hadoop with Apache License 2.0 | 4 votes |
private AddCloseOp(FSEditLogOpCodes opCode) { super(opCode); storagePolicyId = BlockStoragePolicySuite.ID_UNSPECIFIED; assert(opCode == OP_ADD || opCode == OP_CLOSE || opCode == OP_APPEND); }
Example 14
Source File: FSDirStatAndListingOp.java From hadoop with Apache License 2.0 | 4 votes |
/** * Get a partial listing of the indicated directory * * We will stop when any of the following conditions is met: * 1) this.lsLimit files have been added * 2) needLocation is true AND enough files have been added such * that at least this.lsLimit block locations are in the response * * @param fsd FSDirectory * @param iip the INodesInPath instance containing all the INodes along the * path * @param src the directory name * @param startAfter the name to start listing after * @param needLocation if block locations are returned * @return a partial listing starting after startAfter */ private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip, String src, byte[] startAfter, boolean needLocation, boolean isSuperUser) throws IOException { String srcs = FSDirectory.normalizePath(src); final boolean isRawPath = FSDirectory.isReservedRawName(src); fsd.readLock(); try { if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) { return getSnapshotsListing(fsd, srcs, startAfter); } final int snapshot = iip.getPathSnapshotId(); final INode targetNode = iip.getLastINode(); if (targetNode == null) return null; byte parentStoragePolicy = isSuperUser ? targetNode.getStoragePolicyID() : BlockStoragePolicySuite .ID_UNSPECIFIED; if (!targetNode.isDirectory()) { return new DirectoryListing( new HdfsFileStatus[]{createFileStatus(fsd, src, HdfsFileStatus.EMPTY_NAME, targetNode, needLocation, parentStoragePolicy, snapshot, isRawPath, iip)}, 0); } final INodeDirectory dirInode = targetNode.asDirectory(); final ReadOnlyList<INode> contents = dirInode.getChildrenList(snapshot); int startChild = INodeDirectory.nextChild(contents, startAfter); int totalNumChildren = contents.size(); int numOfListing = Math.min(totalNumChildren - startChild, fsd.getLsLimit()); int locationBudget = fsd.getLsLimit(); int listingCnt = 0; HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing]; for (int i=0; i<numOfListing && locationBudget>0; i++) { INode cur = contents.get(startChild+i); byte curPolicy = isSuperUser && !cur.isSymlink()? cur.getLocalStoragePolicyID(): BlockStoragePolicySuite.ID_UNSPECIFIED; listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur, needLocation, getStoragePolicyID(curPolicy, parentStoragePolicy), snapshot, isRawPath, iip); listingCnt++; if (needLocation) { // Once we hit lsLimit locations, stop. // This helps to prevent excessively large response payloads. // Approximate #locations with locatedBlockCount() * repl_factor LocatedBlocks blks = ((HdfsLocatedFileStatus)listing[i]).getBlockLocations(); locationBudget -= (blks == null) ? 0 : blks.locatedBlockCount() * listing[i].getReplication(); } } // truncate return array if necessary if (listingCnt < numOfListing) { listing = Arrays.copyOf(listing, listingCnt); } return new DirectoryListing( listing, totalNumChildren-startChild-listingCnt); } finally { fsd.readUnlock(); } }
Example 15
Source File: FSEditLogOp.java From big-c with Apache License 2.0 | 4 votes |
@Override void readFields(DataInputStream in, int logVersion) throws IOException { if (!NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.length = in.readInt(); } if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.ADD_INODE_ID, logVersion)) { this.inodeId = in.readLong(); } else { // The inodeId should be updated when this editLogOp is applied this.inodeId = INodeId.GRANDFATHER_INODE_ID; } if ((-17 < logVersion && length != 4) || (logVersion <= -17 && length != 5 && !NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion))) { throw new IOException("Incorrect data format." + " logVersion is " + logVersion + " but writables.length is " + length + ". "); } this.path = FSImageSerialization.readString(in); if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.replication = FSImageSerialization.readShort(in); this.mtime = FSImageSerialization.readLong(in); } else { this.replication = readShort(in); this.mtime = readLong(in); } if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.FILE_ACCESS_TIME, logVersion)) { if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.atime = FSImageSerialization.readLong(in); } else { this.atime = readLong(in); } } else { this.atime = 0; } if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.blockSize = FSImageSerialization.readLong(in); } else { this.blockSize = readLong(in); } this.blocks = readBlocks(in, logVersion); this.permissions = PermissionStatus.read(in); if (this.opCode == OP_ADD) { aclEntries = AclEditLogUtil.read(in, logVersion); this.xAttrs = readXAttrsFromEditLog(in, logVersion); this.clientName = FSImageSerialization.readString(in); this.clientMachine = FSImageSerialization.readString(in); if (NameNodeLayoutVersion.supports( NameNodeLayoutVersion.Feature.CREATE_OVERWRITE, logVersion)) { this.overwrite = FSImageSerialization.readBoolean(in); } else { this.overwrite = false; } if (NameNodeLayoutVersion.supports( NameNodeLayoutVersion.Feature.BLOCK_STORAGE_POLICY, logVersion)) { this.storagePolicyId = FSImageSerialization.readByte(in); } else { this.storagePolicyId = BlockStoragePolicySuite.ID_UNSPECIFIED; } // read clientId and callId readRpcIds(in, logVersion); } else { this.clientName = ""; this.clientMachine = ""; } }
Example 16
Source File: FSDirStatAndListingOp.java From hadoop with Apache License 2.0 | 4 votes |
private static byte getStoragePolicyID(byte inodePolicy, byte parentPolicy) { return inodePolicy != BlockStoragePolicySuite.ID_UNSPECIFIED ? inodePolicy : parentPolicy; }
Example 17
Source File: FSDirStatAndListingOp.java From big-c with Apache License 2.0 | 4 votes |
private static byte getStoragePolicyID(byte inodePolicy, byte parentPolicy) { return inodePolicy != BlockStoragePolicySuite.ID_UNSPECIFIED ? inodePolicy : parentPolicy; }
Example 18
Source File: FSDirStatAndListingOp.java From big-c with Apache License 2.0 | 4 votes |
/** * Get a partial listing of the indicated directory * * We will stop when any of the following conditions is met: * 1) this.lsLimit files have been added * 2) needLocation is true AND enough files have been added such * that at least this.lsLimit block locations are in the response * * @param fsd FSDirectory * @param iip the INodesInPath instance containing all the INodes along the * path * @param src the directory name * @param startAfter the name to start listing after * @param needLocation if block locations are returned * @return a partial listing starting after startAfter */ private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip, String src, byte[] startAfter, boolean needLocation, boolean isSuperUser) throws IOException { String srcs = FSDirectory.normalizePath(src); final boolean isRawPath = FSDirectory.isReservedRawName(src); fsd.readLock(); try { if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) { return getSnapshotsListing(fsd, srcs, startAfter); } final int snapshot = iip.getPathSnapshotId(); final INode targetNode = iip.getLastINode(); if (targetNode == null) return null; byte parentStoragePolicy = isSuperUser ? targetNode.getStoragePolicyID() : BlockStoragePolicySuite .ID_UNSPECIFIED; if (!targetNode.isDirectory()) { return new DirectoryListing( new HdfsFileStatus[]{createFileStatus(fsd, src, HdfsFileStatus.EMPTY_NAME, targetNode, needLocation, parentStoragePolicy, snapshot, isRawPath, iip)}, 0); } final INodeDirectory dirInode = targetNode.asDirectory(); final ReadOnlyList<INode> contents = dirInode.getChildrenList(snapshot); int startChild = INodeDirectory.nextChild(contents, startAfter); int totalNumChildren = contents.size(); int numOfListing = Math.min(totalNumChildren - startChild, fsd.getLsLimit()); int locationBudget = fsd.getLsLimit(); int listingCnt = 0; HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing]; for (int i=0; i<numOfListing && locationBudget>0; i++) { INode cur = contents.get(startChild+i); byte curPolicy = isSuperUser && !cur.isSymlink()? cur.getLocalStoragePolicyID(): BlockStoragePolicySuite.ID_UNSPECIFIED; listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur, needLocation, getStoragePolicyID(curPolicy, parentStoragePolicy), snapshot, isRawPath, iip); listingCnt++; if (needLocation) { // Once we hit lsLimit locations, stop. // This helps to prevent excessively large response payloads. // Approximate #locations with locatedBlockCount() * repl_factor LocatedBlocks blks = ((HdfsLocatedFileStatus)listing[i]).getBlockLocations(); locationBudget -= (blks == null) ? 0 : blks.locatedBlockCount() * listing[i].getReplication(); } } // truncate return array if necessary if (listingCnt < numOfListing) { listing = Arrays.copyOf(listing, listingCnt); } return new DirectoryListing( listing, totalNumChildren-startChild-listingCnt); } finally { fsd.readUnlock(); } }
Example 19
Source File: INode.java From hadoop with Apache License 2.0 | 3 votes |
/** * Get the storage policy ID while computing quota usage * @param parentStoragePolicyId the storage policy ID of the parent directory * @return the storage policy ID of this INode. Note that for an * {@link INodeSymlink} we return {@link BlockStoragePolicySuite#ID_UNSPECIFIED} * instead of throwing Exception */ public byte getStoragePolicyIDForQuota(byte parentStoragePolicyId) { byte localId = isSymlink() ? BlockStoragePolicySuite.ID_UNSPECIFIED : getLocalStoragePolicyID(); return localId != BlockStoragePolicySuite.ID_UNSPECIFIED ? localId : parentStoragePolicyId; }
Example 20
Source File: INode.java From big-c with Apache License 2.0 | 3 votes |
/** * Get the storage policy ID while computing quota usage * @param parentStoragePolicyId the storage policy ID of the parent directory * @return the storage policy ID of this INode. Note that for an * {@link INodeSymlink} we return {@link BlockStoragePolicySuite#ID_UNSPECIFIED} * instead of throwing Exception */ public byte getStoragePolicyIDForQuota(byte parentStoragePolicyId) { byte localId = isSymlink() ? BlockStoragePolicySuite.ID_UNSPECIFIED : getLocalStoragePolicyID(); return localId != BlockStoragePolicySuite.ID_UNSPECIFIED ? localId : parentStoragePolicyId; }