Java Code Examples for org.apache.hadoop.hdfs.util.ReadOnlyList#size()
The following examples show how to use
org.apache.hadoop.hdfs.util.ReadOnlyList#size() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSnapshotRename.java From hadoop with Apache License 2.0 | 6 votes |
/** * Check the correctness of snapshot list within snapshottable dir */ private void checkSnapshotList(INodeDirectory srcRoot, String[] sortedNames, String[] names) { assertTrue(srcRoot.isSnapshottable()); ReadOnlyList<Snapshot> listByName = srcRoot .getDirectorySnapshottableFeature().getSnapshotList(); assertEquals(sortedNames.length, listByName.size()); for (int i = 0; i < listByName.size(); i++) { assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName()); } List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList(); assertEquals(names.length, listByTime.size()); for (int i = 0; i < listByTime.size(); i++) { Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById( listByTime.get(i).getSnapshotId()); assertEquals(names[i], s.getRoot().getLocalName()); } }
Example 2
Source File: TestSnapshotRename.java From big-c with Apache License 2.0 | 6 votes |
/** * Check the correctness of snapshot list within snapshottable dir */ private void checkSnapshotList(INodeDirectory srcRoot, String[] sortedNames, String[] names) { assertTrue(srcRoot.isSnapshottable()); ReadOnlyList<Snapshot> listByName = srcRoot .getDirectorySnapshottableFeature().getSnapshotList(); assertEquals(sortedNames.length, listByName.size()); for (int i = 0; i < listByName.size(); i++) { assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName()); } List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList(); assertEquals(names.length, listByTime.size()); for (int i = 0; i < listByTime.size(); i++) { Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById( listByTime.get(i).getSnapshotId()); assertEquals(names[i], s.getRoot().getLocalName()); } }
Example 3
Source File: FSDirStatAndListingOp.java From hadoop with Apache License 2.0 | 5 votes |
/** * Get a listing of all the snapshots of a snapshottable directory */ private static DirectoryListing getSnapshotsListing( FSDirectory fsd, String src, byte[] startAfter) throws IOException { Preconditions.checkState(fsd.hasReadLock()); Preconditions.checkArgument( src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR); final String dirPath = FSDirectory.normalizePath(src.substring(0, src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length())); final INode node = fsd.getINode(dirPath); final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath); final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature(); if (sf == null) { throw new SnapshotException( "Directory is not a snapshottable directory: " + dirPath); } final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList(); int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter); skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1; int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit()); final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing]; for (int i = 0; i < numOfListing; i++) { Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot(); listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot, BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, false, INodesInPath.fromINode(sRoot)); } return new DirectoryListing( listing, snapshots.size() - skipSize - numOfListing); }
Example 4
Source File: FSImageFormatPBINode.java From hadoop with Apache License 2.0 | 5 votes |
void serializeINodeDirectorySection(OutputStream out) throws IOException { Iterator<INodeWithAdditionalFields> iter = fsn.getFSDirectory() .getINodeMap().getMapIterator(); final ArrayList<INodeReference> refList = parent.getSaverContext() .getRefList(); int i = 0; while (iter.hasNext()) { INodeWithAdditionalFields n = iter.next(); if (!n.isDirectory()) { continue; } ReadOnlyList<INode> children = n.asDirectory().getChildrenList( Snapshot.CURRENT_STATE_ID); if (children.size() > 0) { INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection. DirEntry.newBuilder().setParent(n.getId()); for (INode inode : children) { if (!inode.isReference()) { b.addChildren(inode.getId()); } else { refList.add(inode.asReference()); b.addRefChildren(refList.size() - 1); } } INodeDirectorySection.DirEntry e = b.build(); e.writeDelimitedTo(out); } ++i; if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { context.checkCancelled(); } } parent.commitSection(summary, FSImageFormatProtobuf.SectionName.INODE_DIR); }
Example 5
Source File: CacheManager.java From hadoop with Apache License 2.0 | 5 votes |
/** * Computes the needed number of bytes and files for a path. * @return CacheDirectiveStats describing the needed stats for this path */ private CacheDirectiveStats computeNeeded(String path, short replication) { FSDirectory fsDir = namesystem.getFSDirectory(); INode node; long requestedBytes = 0; long requestedFiles = 0; CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder(); try { node = fsDir.getINode(path); } catch (UnresolvedLinkException e) { // We don't cache through symlinks return builder.build(); } if (node == null) { return builder.build(); } if (node.isFile()) { requestedFiles = 1; INodeFile file = node.asFile(); requestedBytes = file.computeFileSize(); } else if (node.isDirectory()) { INodeDirectory dir = node.asDirectory(); ReadOnlyList<INode> children = dir .getChildrenList(Snapshot.CURRENT_STATE_ID); requestedFiles = children.size(); for (INode child : children) { if (child.isFile()) { requestedBytes += child.asFile().computeFileSize(); } } } return new CacheDirectiveStats.Builder() .setBytesNeeded(requestedBytes) .setFilesCached(requestedFiles) .build(); }
Example 6
Source File: INodeDirectory.java From hadoop with Apache License 2.0 | 5 votes |
protected ContentSummaryComputationContext computeDirectoryContentSummary( ContentSummaryComputationContext summary, int snapshotId) { ReadOnlyList<INode> childrenList = getChildrenList(snapshotId); // Explicit traversing is done to enable repositioning after relinquishing // and reacquiring locks. for (int i = 0; i < childrenList.size(); i++) { INode child = childrenList.get(i); byte[] childName = child.getLocalNameBytes(); long lastYieldCount = summary.getYieldCount(); child.computeContentSummary(summary); // Check whether the computation was paused in the subtree. // The counts may be off, but traversing the rest of children // should be made safe. if (lastYieldCount == summary.getYieldCount()) { continue; } // The locks were released and reacquired. Check parent first. if (getParent() == null) { // Stop further counting and return whatever we have so far. break; } // Obtain the children list again since it may have been modified. childrenList = getChildrenList(snapshotId); // Reposition in case the children list is changed. Decrement by 1 // since it will be incremented when loops. i = nextChild(childrenList, childName) - 1; } // Increment the directory count for this directory. summary.getCounts().addContent(Content.DIRECTORY, 1); // Relinquish and reacquire locks if necessary. summary.yield(); return summary; }
Example 7
Source File: FSDirStatAndListingOp.java From big-c with Apache License 2.0 | 5 votes |
/** * Get a listing of all the snapshots of a snapshottable directory */ private static DirectoryListing getSnapshotsListing( FSDirectory fsd, String src, byte[] startAfter) throws IOException { Preconditions.checkState(fsd.hasReadLock()); Preconditions.checkArgument( src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR); final String dirPath = FSDirectory.normalizePath(src.substring(0, src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length())); final INode node = fsd.getINode(dirPath); final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath); final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature(); if (sf == null) { throw new SnapshotException( "Directory is not a snapshottable directory: " + dirPath); } final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList(); int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter); skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1; int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit()); final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing]; for (int i = 0; i < numOfListing; i++) { Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot(); listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot, BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, false, INodesInPath.fromINode(sRoot)); } return new DirectoryListing( listing, snapshots.size() - skipSize - numOfListing); }
Example 8
Source File: FSImageFormatPBINode.java From big-c with Apache License 2.0 | 5 votes |
void serializeINodeDirectorySection(OutputStream out) throws IOException { Iterator<INodeWithAdditionalFields> iter = fsn.getFSDirectory() .getINodeMap().getMapIterator(); final ArrayList<INodeReference> refList = parent.getSaverContext() .getRefList(); int i = 0; while (iter.hasNext()) { INodeWithAdditionalFields n = iter.next(); if (!n.isDirectory()) { continue; } ReadOnlyList<INode> children = n.asDirectory().getChildrenList( Snapshot.CURRENT_STATE_ID); if (children.size() > 0) { INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection. DirEntry.newBuilder().setParent(n.getId()); for (INode inode : children) { if (!inode.isReference()) { b.addChildren(inode.getId()); } else { refList.add(inode.asReference()); b.addRefChildren(refList.size() - 1); } } INodeDirectorySection.DirEntry e = b.build(); e.writeDelimitedTo(out); } ++i; if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { context.checkCancelled(); } } parent.commitSection(summary, FSImageFormatProtobuf.SectionName.INODE_DIR); }
Example 9
Source File: CacheManager.java From big-c with Apache License 2.0 | 5 votes |
/** * Computes the needed number of bytes and files for a path. * @return CacheDirectiveStats describing the needed stats for this path */ private CacheDirectiveStats computeNeeded(String path, short replication) { FSDirectory fsDir = namesystem.getFSDirectory(); INode node; long requestedBytes = 0; long requestedFiles = 0; CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder(); try { node = fsDir.getINode(path); } catch (UnresolvedLinkException e) { // We don't cache through symlinks return builder.build(); } if (node == null) { return builder.build(); } if (node.isFile()) { requestedFiles = 1; INodeFile file = node.asFile(); requestedBytes = file.computeFileSize(); } else if (node.isDirectory()) { INodeDirectory dir = node.asDirectory(); ReadOnlyList<INode> children = dir .getChildrenList(Snapshot.CURRENT_STATE_ID); requestedFiles = children.size(); for (INode child : children) { if (child.isFile()) { requestedBytes += child.asFile().computeFileSize(); } } } return new CacheDirectiveStats.Builder() .setBytesNeeded(requestedBytes) .setFilesCached(requestedFiles) .build(); }
Example 10
Source File: INodeDirectory.java From big-c with Apache License 2.0 | 5 votes |
protected ContentSummaryComputationContext computeDirectoryContentSummary( ContentSummaryComputationContext summary, int snapshotId) { ReadOnlyList<INode> childrenList = getChildrenList(snapshotId); // Explicit traversing is done to enable repositioning after relinquishing // and reacquiring locks. for (int i = 0; i < childrenList.size(); i++) { INode child = childrenList.get(i); byte[] childName = child.getLocalNameBytes(); long lastYieldCount = summary.getYieldCount(); child.computeContentSummary(summary); // Check whether the computation was paused in the subtree. // The counts may be off, but traversing the rest of children // should be made safe. if (lastYieldCount == summary.getYieldCount()) { continue; } // The locks were released and reacquired. Check parent first. if (getParent() == null) { // Stop further counting and return whatever we have so far. break; } // Obtain the children list again since it may have been modified. childrenList = getChildrenList(snapshotId); // Reposition in case the children list is changed. Decrement by 1 // since it will be incremented when loops. i = nextChild(childrenList, childName) - 1; } // Increment the directory count for this directory. summary.getCounts().addContent(Content.DIRECTORY, 1); // Relinquish and reacquire locks if necessary. summary.yield(); return summary; }
Example 11
Source File: FSDirStatAndListingOp.java From hadoop with Apache License 2.0 | 4 votes |
/** * Get a partial listing of the indicated directory * * We will stop when any of the following conditions is met: * 1) this.lsLimit files have been added * 2) needLocation is true AND enough files have been added such * that at least this.lsLimit block locations are in the response * * @param fsd FSDirectory * @param iip the INodesInPath instance containing all the INodes along the * path * @param src the directory name * @param startAfter the name to start listing after * @param needLocation if block locations are returned * @return a partial listing starting after startAfter */ private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip, String src, byte[] startAfter, boolean needLocation, boolean isSuperUser) throws IOException { String srcs = FSDirectory.normalizePath(src); final boolean isRawPath = FSDirectory.isReservedRawName(src); fsd.readLock(); try { if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) { return getSnapshotsListing(fsd, srcs, startAfter); } final int snapshot = iip.getPathSnapshotId(); final INode targetNode = iip.getLastINode(); if (targetNode == null) return null; byte parentStoragePolicy = isSuperUser ? targetNode.getStoragePolicyID() : BlockStoragePolicySuite .ID_UNSPECIFIED; if (!targetNode.isDirectory()) { return new DirectoryListing( new HdfsFileStatus[]{createFileStatus(fsd, src, HdfsFileStatus.EMPTY_NAME, targetNode, needLocation, parentStoragePolicy, snapshot, isRawPath, iip)}, 0); } final INodeDirectory dirInode = targetNode.asDirectory(); final ReadOnlyList<INode> contents = dirInode.getChildrenList(snapshot); int startChild = INodeDirectory.nextChild(contents, startAfter); int totalNumChildren = contents.size(); int numOfListing = Math.min(totalNumChildren - startChild, fsd.getLsLimit()); int locationBudget = fsd.getLsLimit(); int listingCnt = 0; HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing]; for (int i=0; i<numOfListing && locationBudget>0; i++) { INode cur = contents.get(startChild+i); byte curPolicy = isSuperUser && !cur.isSymlink()? cur.getLocalStoragePolicyID(): BlockStoragePolicySuite.ID_UNSPECIFIED; listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur, needLocation, getStoragePolicyID(curPolicy, parentStoragePolicy), snapshot, isRawPath, iip); listingCnt++; if (needLocation) { // Once we hit lsLimit locations, stop. // This helps to prevent excessively large response payloads. // Approximate #locations with locatedBlockCount() * repl_factor LocatedBlocks blks = ((HdfsLocatedFileStatus)listing[i]).getBlockLocations(); locationBudget -= (blks == null) ? 0 : blks.locatedBlockCount() * listing[i].getReplication(); } } // truncate return array if necessary if (listingCnt < numOfListing) { listing = Arrays.copyOf(listing, listingCnt); } return new DirectoryListing( listing, totalNumChildren-startChild-listingCnt); } finally { fsd.readUnlock(); } }
Example 12
Source File: FSDirStatAndListingOp.java From big-c with Apache License 2.0 | 4 votes |
/** * Get a partial listing of the indicated directory * * We will stop when any of the following conditions is met: * 1) this.lsLimit files have been added * 2) needLocation is true AND enough files have been added such * that at least this.lsLimit block locations are in the response * * @param fsd FSDirectory * @param iip the INodesInPath instance containing all the INodes along the * path * @param src the directory name * @param startAfter the name to start listing after * @param needLocation if block locations are returned * @return a partial listing starting after startAfter */ private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip, String src, byte[] startAfter, boolean needLocation, boolean isSuperUser) throws IOException { String srcs = FSDirectory.normalizePath(src); final boolean isRawPath = FSDirectory.isReservedRawName(src); fsd.readLock(); try { if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) { return getSnapshotsListing(fsd, srcs, startAfter); } final int snapshot = iip.getPathSnapshotId(); final INode targetNode = iip.getLastINode(); if (targetNode == null) return null; byte parentStoragePolicy = isSuperUser ? targetNode.getStoragePolicyID() : BlockStoragePolicySuite .ID_UNSPECIFIED; if (!targetNode.isDirectory()) { return new DirectoryListing( new HdfsFileStatus[]{createFileStatus(fsd, src, HdfsFileStatus.EMPTY_NAME, targetNode, needLocation, parentStoragePolicy, snapshot, isRawPath, iip)}, 0); } final INodeDirectory dirInode = targetNode.asDirectory(); final ReadOnlyList<INode> contents = dirInode.getChildrenList(snapshot); int startChild = INodeDirectory.nextChild(contents, startAfter); int totalNumChildren = contents.size(); int numOfListing = Math.min(totalNumChildren - startChild, fsd.getLsLimit()); int locationBudget = fsd.getLsLimit(); int listingCnt = 0; HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing]; for (int i=0; i<numOfListing && locationBudget>0; i++) { INode cur = contents.get(startChild+i); byte curPolicy = isSuperUser && !cur.isSymlink()? cur.getLocalStoragePolicyID(): BlockStoragePolicySuite.ID_UNSPECIFIED; listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur, needLocation, getStoragePolicyID(curPolicy, parentStoragePolicy), snapshot, isRawPath, iip); listingCnt++; if (needLocation) { // Once we hit lsLimit locations, stop. // This helps to prevent excessively large response payloads. // Approximate #locations with locatedBlockCount() * repl_factor LocatedBlocks blks = ((HdfsLocatedFileStatus)listing[i]).getBlockLocations(); locationBudget -= (blks == null) ? 0 : blks.locatedBlockCount() * listing[i].getReplication(); } } // truncate return array if necessary if (listingCnt < numOfListing) { listing = Arrays.copyOf(listing, listingCnt); } return new DirectoryListing( listing, totalNumChildren-startChild-listingCnt); } finally { fsd.readUnlock(); } }