Java Code Examples for org.apache.hadoop.hdfs.server.namenode.INode#asDirectory()
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.INode#asDirectory() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Snapshot.java From hadoop with Apache License 2.0 | 5 votes |
/** * Find the latest snapshot that 1) covers the given inode (which means the * snapshot was either taken on the inode or taken on an ancestor of the * inode), and 2) was taken before the given snapshot (if the given snapshot * is not null). * * @param inode the given inode that the returned snapshot needs to cover * @param anchor the returned snapshot should be taken before this given id. * @return id of the latest snapshot that covers the given inode and was taken * before the the given snapshot (if it is not null). */ public static int findLatestSnapshot(INode inode, final int anchor) { int latest = NO_SNAPSHOT_ID; for(; inode != null; inode = inode.getParent()) { if (inode.isDirectory()) { final INodeDirectory dir = inode.asDirectory(); if (dir.isWithSnapshot()) { latest = dir.getDiffs().updatePrior(anchor, latest); } } } return latest; }
Example 2
Source File: Snapshot.java From big-c with Apache License 2.0 | 5 votes |
/** * Find the latest snapshot that 1) covers the given inode (which means the * snapshot was either taken on the inode or taken on an ancestor of the * inode), and 2) was taken before the given snapshot (if the given snapshot * is not null). * * @param inode the given inode that the returned snapshot needs to cover * @param anchor the returned snapshot should be taken before this given id. * @return id of the latest snapshot that covers the given inode and was taken * before the the given snapshot (if it is not null). */ public static int findLatestSnapshot(INode inode, final int anchor) { int latest = NO_SNAPSHOT_ID; for(; inode != null; inode = inode.getParent()) { if (inode.isDirectory()) { final INodeDirectory dir = inode.asDirectory(); if (dir.isWithSnapshot()) { latest = dir.getDiffs().updatePrior(anchor, latest); } } } return latest; }
Example 3
Source File: Snapshot.java From hadoop with Apache License 2.0 | 4 votes |
static Snapshot read(DataInput in, FSImageFormat.Loader loader) throws IOException { final int snapshotId = in.readInt(); final INode root = loader.loadINodeWithLocalName(false, in, false); return new Snapshot(snapshotId, root.asDirectory(), null); }
Example 4
Source File: DirectoryWithSnapshotFeature.java From hadoop with Apache License 2.0 | 4 votes |
/** * Destroy a subtree under a DstReference node. */ public static void destroyDstSubtree( final BlockStoragePolicySuite bsps, INode inode, final int snapshot, final int prior, final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) throws QuotaExceededException { Preconditions.checkArgument(prior != Snapshot.NO_SNAPSHOT_ID); if (inode.isReference()) { if (inode instanceof INodeReference.WithName && snapshot != Snapshot.CURRENT_STATE_ID) { // this inode has been renamed before the deletion of the DstReference // subtree inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes); } else { // for DstReference node, continue this process to its subtree destroyDstSubtree(bsps, inode.asReference().getReferredINode(), snapshot, prior, collectedBlocks, removedINodes); } } else if (inode.isFile()) { inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes); } else if (inode.isDirectory()) { Map<INode, INode> excludedNodes = null; INodeDirectory dir = inode.asDirectory(); DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature(); if (sf != null) { DirectoryDiffList diffList = sf.getDiffs(); DirectoryDiff priorDiff = diffList.getDiffById(prior); if (priorDiff != null && priorDiff.getSnapshotId() == prior) { List<INode> dList = priorDiff.diff.getList(ListType.DELETED); excludedNodes = cloneDiffList(dList); } if (snapshot != Snapshot.CURRENT_STATE_ID) { diffList.deleteSnapshotDiff(bsps, snapshot, prior, dir, collectedBlocks, removedINodes); } priorDiff = diffList.getDiffById(prior); if (priorDiff != null && priorDiff.getSnapshotId() == prior) { priorDiff.diff.destroyCreatedList(bsps, dir, collectedBlocks, removedINodes); } } for (INode child : inode.asDirectory().getChildrenList(prior)) { if (excludedNodes != null && excludedNodes.containsKey(child)) { continue; } destroyDstSubtree(bsps, child, snapshot, prior, collectedBlocks, removedINodes); } } }
Example 5
Source File: DirectoryWithSnapshotFeature.java From hadoop with Apache License 2.0 | 4 votes |
/** * Clean an inode while we move it from the deleted list of post to the * deleted list of prior. * @param bsps The block storage policy suite. * @param inode The inode to clean. * @param post The post snapshot. * @param prior The id of the prior snapshot. * @param collectedBlocks Used to collect blocks for later deletion. * @return Quota usage update. */ private static QuotaCounts cleanDeletedINode( final BlockStoragePolicySuite bsps, INode inode, final int post, final int prior, final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) { QuotaCounts counts = new QuotaCounts.Builder().build(); Deque<INode> queue = new ArrayDeque<INode>(); queue.addLast(inode); while (!queue.isEmpty()) { INode topNode = queue.pollFirst(); if (topNode instanceof INodeReference.WithName) { INodeReference.WithName wn = (INodeReference.WithName) topNode; if (wn.getLastSnapshotId() >= post) { INodeReference.WithCount wc = (INodeReference.WithCount) wn.getReferredINode(); if (wc.getLastWithName() == wn && wc.getParentReference() == null) { // this wn is the last wn inside of the wc, also the dstRef node has // been deleted. In this case, we should treat the referred file/dir // as normal case queue.add(wc.getReferredINode()); } else { wn.cleanSubtree(bsps, post, prior, collectedBlocks, removedINodes); } } // For DstReference node, since the node is not in the created list of // prior, we should treat it as regular file/dir } else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) { INodeFile file = topNode.asFile(); counts.add(file.getDiffs().deleteSnapshotDiff(bsps, post, prior, file, collectedBlocks, removedINodes)); } else if (topNode.isDirectory()) { INodeDirectory dir = topNode.asDirectory(); ChildrenDiff priorChildrenDiff = null; DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature(); if (sf != null) { // delete files/dirs created after prior. Note that these // files/dirs, along with inode, were deleted right after post. DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior); if (priorDiff != null && priorDiff.getSnapshotId() == prior) { priorChildrenDiff = priorDiff.getChildrenDiff(); counts.add(priorChildrenDiff.destroyCreatedList(bsps, dir, collectedBlocks, removedINodes)); } } for (INode child : dir.getChildrenList(prior)) { if (priorChildrenDiff != null && priorChildrenDiff.search(ListType.DELETED, child.getLocalNameBytes()) != null) { continue; } queue.addLast(child); } } } return counts; }
Example 6
Source File: CacheReplicationMonitor.java From hadoop with Apache License 2.0 | 4 votes |
/** * Scan all CacheDirectives. Use the information to figure out * what cache replication factor each block should have. */ private void rescanCacheDirectives() { FSDirectory fsDir = namesystem.getFSDirectory(); final long now = new Date().getTime(); for (CacheDirective directive : cacheManager.getCacheDirectives()) { scannedDirectives++; // Skip processing this entry if it has expired if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) { LOG.debug("Directive {}: the directive expired at {} (now = {})", directive.getId(), directive.getExpiryTime(), now); continue; } String path = directive.getPath(); INode node; try { node = fsDir.getINode(path); } catch (UnresolvedLinkException e) { // We don't cache through symlinks LOG.debug("Directive {}: got UnresolvedLinkException while resolving " + "path {}", directive.getId(), path ); continue; } if (node == null) { LOG.debug("Directive {}: No inode found at {}", directive.getId(), path); } else if (node.isDirectory()) { INodeDirectory dir = node.asDirectory(); ReadOnlyList<INode> children = dir .getChildrenList(Snapshot.CURRENT_STATE_ID); for (INode child : children) { if (child.isFile()) { rescanFile(directive, child.asFile()); } } } else if (node.isFile()) { rescanFile(directive, node.asFile()); } else { LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ", directive.getId(), node); } } }
Example 7
Source File: TestSnapshotDeletion.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testRenameAndDelete() throws IOException { final Path foo = new Path("/foo"); final Path x = new Path(foo, "x"); final Path y = new Path(foo, "y"); final Path trash = new Path("/trash"); hdfs.mkdirs(x); hdfs.mkdirs(y); final long parentId = fsdir.getINode4Write(y.toString()).getId(); hdfs.mkdirs(trash); hdfs.allowSnapshot(foo); // 1. create snapshot s0 hdfs.createSnapshot(foo, "s0"); // 2. create file /foo/x/bar final Path file = new Path(x, "bar"); DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, (short) 1, 0L); final long fileId = fsdir.getINode4Write(file.toString()).getId(); // 3. move file into /foo/y final Path newFile = new Path(y, "bar"); hdfs.rename(file, newFile); // 4. create snapshot s1 hdfs.createSnapshot(foo, "s1"); // 5. move /foo/y to /trash final Path deletedY = new Path(trash, "y"); hdfs.rename(y, deletedY); // 6. create snapshot s2 hdfs.createSnapshot(foo, "s2"); // 7. delete /trash/y hdfs.delete(deletedY, true); // 8. delete snapshot s1 hdfs.deleteSnapshot(foo, "s1"); // make sure bar has been removed from its parent INode p = fsdir.getInode(parentId); Assert.assertNotNull(p); INodeDirectory pd = p.asDirectory(); Assert.assertNotNull(pd); Assert.assertNull(pd.getChild("bar".getBytes(), Snapshot.CURRENT_STATE_ID)); // make sure bar has been cleaned from inodeMap Assert.assertNull(fsdir.getInode(fileId)); }
Example 8
Source File: TestRenameWithSnapshots.java From hadoop with Apache License 2.0 | 4 votes |
/** * Rename and deletion snapshot under the same the snapshottable directory. */ @Test public void testRenameDirAndDeleteSnapshot_6() throws Exception { final Path test = new Path("/test"); final Path dir1 = new Path(test, "dir1"); final Path dir2 = new Path(test, "dir2"); hdfs.mkdirs(dir1); hdfs.mkdirs(dir2); final Path foo = new Path(dir2, "foo"); final Path bar = new Path(foo, "bar"); final Path file = new Path(bar, "file"); DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED); // take a snapshot on /test SnapshotTestHelper.createSnapshot(hdfs, test, "s0"); // delete /test/dir2/foo/bar/file after snapshot s0, so that there is a // snapshot copy recorded in bar hdfs.delete(file, true); // rename foo from dir2 to dir1 final Path newfoo = new Path(dir1, foo.getName()); hdfs.rename(foo, newfoo); final Path foo_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0", "dir2/foo"); assertTrue("the snapshot path " + foo_s0 + " should exist", hdfs.exists(foo_s0)); // delete snapshot s0. The deletion will first go down through dir1, and // find foo in the created list of dir1. Then it will use null as the prior // snapshot and continue the snapshot deletion process in the subtree of // foo. We need to make sure the snapshot s0 can be deleted cleanly in the // foo subtree. hdfs.deleteSnapshot(test, "s0"); // check the internal assertFalse("after deleting s0, " + foo_s0 + " should not exist", hdfs.exists(foo_s0)); INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString()) .asDirectory(); assertTrue("the diff list of " + dir2 + " should be empty after deleting s0", dir2Node.getDiffs().asList() .isEmpty()); assertTrue(hdfs.exists(newfoo)); INode fooRefNode = fsdir.getINode4Write(newfoo.toString()); assertTrue(fooRefNode instanceof INodeReference.DstReference); INodeDirectory fooNode = fooRefNode.asDirectory(); // fooNode should be still INodeDirectory (With Snapshot) since we call // recordModification before the rename assertTrue(fooNode.isWithSnapshot()); assertTrue(fooNode.getDiffs().asList().isEmpty()); INodeDirectory barNode = fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID) .get(0).asDirectory(); // bar should also be INodeDirectory (With Snapshot), and both of its diff // list and children list are empty assertTrue(barNode.getDiffs().asList().isEmpty()); assertTrue(barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty()); restartClusterAndCheckImage(true); }
Example 9
Source File: Snapshot.java From big-c with Apache License 2.0 | 4 votes |
static Snapshot read(DataInput in, FSImageFormat.Loader loader) throws IOException { final int snapshotId = in.readInt(); final INode root = loader.loadINodeWithLocalName(false, in, false); return new Snapshot(snapshotId, root.asDirectory(), null); }
Example 10
Source File: DirectoryWithSnapshotFeature.java From big-c with Apache License 2.0 | 4 votes |
/** * Destroy a subtree under a DstReference node. */ public static void destroyDstSubtree( final BlockStoragePolicySuite bsps, INode inode, final int snapshot, final int prior, final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) throws QuotaExceededException { Preconditions.checkArgument(prior != Snapshot.NO_SNAPSHOT_ID); if (inode.isReference()) { if (inode instanceof INodeReference.WithName && snapshot != Snapshot.CURRENT_STATE_ID) { // this inode has been renamed before the deletion of the DstReference // subtree inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes); } else { // for DstReference node, continue this process to its subtree destroyDstSubtree(bsps, inode.asReference().getReferredINode(), snapshot, prior, collectedBlocks, removedINodes); } } else if (inode.isFile()) { inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes); } else if (inode.isDirectory()) { Map<INode, INode> excludedNodes = null; INodeDirectory dir = inode.asDirectory(); DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature(); if (sf != null) { DirectoryDiffList diffList = sf.getDiffs(); DirectoryDiff priorDiff = diffList.getDiffById(prior); if (priorDiff != null && priorDiff.getSnapshotId() == prior) { List<INode> dList = priorDiff.diff.getList(ListType.DELETED); excludedNodes = cloneDiffList(dList); } if (snapshot != Snapshot.CURRENT_STATE_ID) { diffList.deleteSnapshotDiff(bsps, snapshot, prior, dir, collectedBlocks, removedINodes); } priorDiff = diffList.getDiffById(prior); if (priorDiff != null && priorDiff.getSnapshotId() == prior) { priorDiff.diff.destroyCreatedList(bsps, dir, collectedBlocks, removedINodes); } } for (INode child : inode.asDirectory().getChildrenList(prior)) { if (excludedNodes != null && excludedNodes.containsKey(child)) { continue; } destroyDstSubtree(bsps, child, snapshot, prior, collectedBlocks, removedINodes); } } }
Example 11
Source File: DirectoryWithSnapshotFeature.java From big-c with Apache License 2.0 | 4 votes |
/** * Clean an inode while we move it from the deleted list of post to the * deleted list of prior. * @param bsps The block storage policy suite. * @param inode The inode to clean. * @param post The post snapshot. * @param prior The id of the prior snapshot. * @param collectedBlocks Used to collect blocks for later deletion. * @return Quota usage update. */ private static QuotaCounts cleanDeletedINode( final BlockStoragePolicySuite bsps, INode inode, final int post, final int prior, final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) { QuotaCounts counts = new QuotaCounts.Builder().build(); Deque<INode> queue = new ArrayDeque<INode>(); queue.addLast(inode); while (!queue.isEmpty()) { INode topNode = queue.pollFirst(); if (topNode instanceof INodeReference.WithName) { INodeReference.WithName wn = (INodeReference.WithName) topNode; if (wn.getLastSnapshotId() >= post) { wn.cleanSubtree(bsps, post, prior, collectedBlocks, removedINodes); } // For DstReference node, since the node is not in the created list of // prior, we should treat it as regular file/dir } else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) { INodeFile file = topNode.asFile(); counts.add(file.getDiffs().deleteSnapshotDiff(bsps, post, prior, file, collectedBlocks, removedINodes)); } else if (topNode.isDirectory()) { INodeDirectory dir = topNode.asDirectory(); ChildrenDiff priorChildrenDiff = null; DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature(); if (sf != null) { // delete files/dirs created after prior. Note that these // files/dirs, along with inode, were deleted right after post. DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior); if (priorDiff != null && priorDiff.getSnapshotId() == prior) { priorChildrenDiff = priorDiff.getChildrenDiff(); counts.add(priorChildrenDiff.destroyCreatedList(bsps, dir, collectedBlocks, removedINodes)); } } for (INode child : dir.getChildrenList(prior)) { if (priorChildrenDiff != null && priorChildrenDiff.search(ListType.DELETED, child.getLocalNameBytes()) != null) { continue; } queue.addLast(child); } } } return counts; }
Example 12
Source File: CacheReplicationMonitor.java From big-c with Apache License 2.0 | 4 votes |
/** * Scan all CacheDirectives. Use the information to figure out * what cache replication factor each block should have. */ private void rescanCacheDirectives() { FSDirectory fsDir = namesystem.getFSDirectory(); final long now = new Date().getTime(); for (CacheDirective directive : cacheManager.getCacheDirectives()) { scannedDirectives++; // Skip processing this entry if it has expired if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) { LOG.debug("Directive {}: the directive expired at {} (now = {})", directive.getId(), directive.getExpiryTime(), now); continue; } String path = directive.getPath(); INode node; try { node = fsDir.getINode(path); } catch (UnresolvedLinkException e) { // We don't cache through symlinks LOG.debug("Directive {}: got UnresolvedLinkException while resolving " + "path {}", directive.getId(), path ); continue; } if (node == null) { LOG.debug("Directive {}: No inode found at {}", directive.getId(), path); } else if (node.isDirectory()) { INodeDirectory dir = node.asDirectory(); ReadOnlyList<INode> children = dir .getChildrenList(Snapshot.CURRENT_STATE_ID); for (INode child : children) { if (child.isFile()) { rescanFile(directive, child.asFile()); } } } else if (node.isFile()) { rescanFile(directive, node.asFile()); } else { LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ", directive.getId(), node); } } }
Example 13
Source File: TestRenameWithSnapshots.java From big-c with Apache License 2.0 | 4 votes |
/** * Rename and deletion snapshot under the same the snapshottable directory. */ @Test public void testRenameDirAndDeleteSnapshot_6() throws Exception { final Path test = new Path("/test"); final Path dir1 = new Path(test, "dir1"); final Path dir2 = new Path(test, "dir2"); hdfs.mkdirs(dir1); hdfs.mkdirs(dir2); final Path foo = new Path(dir2, "foo"); final Path bar = new Path(foo, "bar"); final Path file = new Path(bar, "file"); DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED); // take a snapshot on /test SnapshotTestHelper.createSnapshot(hdfs, test, "s0"); // delete /test/dir2/foo/bar/file after snapshot s0, so that there is a // snapshot copy recorded in bar hdfs.delete(file, true); // rename foo from dir2 to dir1 final Path newfoo = new Path(dir1, foo.getName()); hdfs.rename(foo, newfoo); final Path foo_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0", "dir2/foo"); assertTrue("the snapshot path " + foo_s0 + " should exist", hdfs.exists(foo_s0)); // delete snapshot s0. The deletion will first go down through dir1, and // find foo in the created list of dir1. Then it will use null as the prior // snapshot and continue the snapshot deletion process in the subtree of // foo. We need to make sure the snapshot s0 can be deleted cleanly in the // foo subtree. hdfs.deleteSnapshot(test, "s0"); // check the internal assertFalse("after deleting s0, " + foo_s0 + " should not exist", hdfs.exists(foo_s0)); INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString()) .asDirectory(); assertTrue("the diff list of " + dir2 + " should be empty after deleting s0", dir2Node.getDiffs().asList() .isEmpty()); assertTrue(hdfs.exists(newfoo)); INode fooRefNode = fsdir.getINode4Write(newfoo.toString()); assertTrue(fooRefNode instanceof INodeReference.DstReference); INodeDirectory fooNode = fooRefNode.asDirectory(); // fooNode should be still INodeDirectory (With Snapshot) since we call // recordModification before the rename assertTrue(fooNode.isWithSnapshot()); assertTrue(fooNode.getDiffs().asList().isEmpty()); INodeDirectory barNode = fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID) .get(0).asDirectory(); // bar should also be INodeDirectory (With Snapshot), and both of its diff // list and children list are empty assertTrue(barNode.getDiffs().asList().isEmpty()); assertTrue(barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty()); restartClusterAndCheckImage(true); }