org.apache.hadoop.hdfs.server.namenode.INode Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.INode.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestHadoopNNAOperations.java From NNAnalytics with Apache License 2.0 | 6 votes |
@BeforeClass public static void beforeClass() throws Exception { GSetGenerator gSetGenerator = new GSetGenerator(); gSetGenerator.clear(); GSet<INode, INodeWithAdditionalFields> gset = gSetGenerator.getGSet((short) 3, 10, 500); nna = new HadoopWebServerMain(); ApplicationConfiguration conf = new ApplicationConfiguration(); conf.set("ldap.enable", "false"); conf.set("authorization.enable", "false"); conf.set("nna.historical", "false"); conf.set("nna.base.dir", MiniDFSCluster.getBaseDirectory()); conf.set("nna.web.base.dir", "src/main/resources/webapps/nna"); conf.set("nna.query.engine.impl", JavaStreamQueryEngine.class.getCanonicalName()); nna.init(conf, gset); hostPort = new HttpHost("localhost", 4567); }
Example #2
Source File: TestDiff.java From hadoop with Apache License 2.0 | 6 votes |
static boolean hasIdenticalElements(final List<INode> expected, final List<INode> computed) { if (expected == null) { return computed == null; } if (expected.size() != computed.size()) { return false; } for(int i = 0; i < expected.size(); i++) { // must be the same object (equals is not enough) if (expected.get(i) != computed.get(i)) { return false; } } return true; }
Example #3
Source File: FileWithSnapshotFeature.java From hadoop with Apache License 2.0 | 6 votes |
public QuotaCounts cleanFile(final BlockStoragePolicySuite bsps, final INodeFile file, final int snapshotId, int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) { if (snapshotId == Snapshot.CURRENT_STATE_ID) { // delete the current file while the file has snapshot feature if (!isCurrentFileDeleted()) { file.recordModification(priorSnapshotId); deleteCurrentFile(); } collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes); return new QuotaCounts.Builder().build(); } else { // delete the snapshot priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId); return diffs.deleteSnapshotDiff(bsps, snapshotId, priorSnapshotId, file, collectedBlocks, removedINodes); } }
Example #4
Source File: FileWithSnapshotFeature.java From hadoop with Apache License 2.0 | 6 votes |
/** * If some blocks at the end of the block list no longer belongs to * any inode, collect them and update the block list. */ public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file, final BlocksMapUpdateInfo info, final List<INode> removedINodes) { // check if everything is deleted. if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) { file.destroyAndCollectBlocks(bsps, info, removedINodes); return; } // find max file size. final long max; FileDiff diff = getDiffs().getLast(); if (isCurrentFileDeleted()) { max = diff == null? 0: diff.getFileSize(); } else { max = file.computeFileSize(); } // Collect blocks that should be deleted FileDiff last = diffs.getLast(); BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks(); if(snapshotBlocks == null) file.collectBlocksBeyondMax(max, info); else file.collectBlocksBeyondSnapshot(snapshotBlocks, info); }
Example #5
Source File: SnapshotDiffInfo.java From big-c with Apache License 2.0 | 6 votes |
/** * Generate a {@link SnapshotDiffReport} based on detailed diff information. * @return A {@link SnapshotDiffReport} describing the difference */ public SnapshotDiffReport generateReport() { List<DiffReportEntry> diffReportList = new ArrayList<DiffReportEntry>(); for (Map.Entry<INode,byte[][]> drEntry : diffMap.entrySet()) { INode node = drEntry.getKey(); byte[][] path = drEntry.getValue(); diffReportList.add(new DiffReportEntry(DiffType.MODIFY, path, null)); if (node.isDirectory()) { List<DiffReportEntry> subList = generateReport(dirDiffMap.get(node), path, isFromEarlier(), renameMap); diffReportList.addAll(subList); } } return new SnapshotDiffReport(snapshotRoot.getFullPathName(), Snapshot.getSnapshotName(from), Snapshot.getSnapshotName(to), diffReportList); }
Example #6
Source File: FSImageFormatPBSnapshot.java From hadoop with Apache License 2.0 | 6 votes |
/** * Load the snapshot diff section from fsimage. */ public void loadSnapshotDiffSection(InputStream in) throws IOException { final List<INodeReference> refList = parent.getLoaderContext() .getRefList(); while (true) { SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry .parseDelimitedFrom(in); if (entry == null) { break; } long inodeId = entry.getInodeId(); INode inode = fsDir.getInode(inodeId); SnapshotDiffSection.DiffEntry.Type type = entry.getType(); switch (type) { case FILEDIFF: loadFileDiffList(in, inode.asFile(), entry.getNumOfDiff()); break; case DIRECTORYDIFF: loadDirectoryDiffList(in, inode.asDirectory(), entry.getNumOfDiff(), refList); break; } } }
Example #7
Source File: Mover.java From big-c with Apache License 2.0 | 6 votes |
/** * @return true if the given path is a snapshot path and the corresponding * INode is still in the current fsdirectory. */ private boolean isSnapshotPathInCurrent(String path) throws IOException { // if the parent path contains "/.snapshot/", this is a snapshot path if (path.contains(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR)) { String[] pathComponents = INode.getPathNames(path); if (HdfsConstants.DOT_SNAPSHOT_DIR .equals(pathComponents[pathComponents.length - 2])) { // this is a path for a specific snapshot (e.g., /foo/.snapshot/s1) return false; } String nonSnapshotPath = convertSnapshotPath(pathComponents); return dfs.getFileInfo(nonSnapshotPath) != null; } else { return false; } }
Example #8
Source File: TestDiff.java From big-c with Apache License 2.0 | 6 votes |
static boolean hasIdenticalElements(final List<INode> expected, final List<INode> computed) { if (expected == null) { return computed == null; } if (expected.size() != computed.size()) { return false; } for(int i = 0; i < expected.size(); i++) { // must be the same object (equals is not enough) if (expected.get(i) != computed.get(i)) { return false; } } return true; }
Example #9
Source File: FSImageFormatPBSnapshot.java From big-c with Apache License 2.0 | 6 votes |
/** * Load the snapshot diff section from fsimage. */ public void loadSnapshotDiffSection(InputStream in) throws IOException { final List<INodeReference> refList = parent.getLoaderContext() .getRefList(); while (true) { SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry .parseDelimitedFrom(in); if (entry == null) { break; } long inodeId = entry.getInodeId(); INode inode = fsDir.getInode(inodeId); SnapshotDiffSection.DiffEntry.Type type = entry.getType(); switch (type) { case FILEDIFF: loadFileDiffList(in, inode.asFile(), entry.getNumOfDiff()); break; case DIRECTORYDIFF: loadDirectoryDiffList(in, inode.asDirectory(), entry.getNumOfDiff(), refList); break; } } }
Example #10
Source File: FSImageFormatPBSnapshot.java From big-c with Apache License 2.0 | 6 votes |
private INodeReference loadINodeReference( INodeReferenceSection.INodeReference r) throws IOException { long referredId = r.getReferredId(); INode referred = fsDir.getInode(referredId); WithCount withCount = (WithCount) referred.getParentReference(); if (withCount == null) { withCount = new INodeReference.WithCount(null, referred); } final INodeReference ref; if (r.hasDstSnapshotId()) { // DstReference ref = new INodeReference.DstReference(null, withCount, r.getDstSnapshotId()); } else { ref = new INodeReference.WithName(null, withCount, r.getName() .toByteArray(), r.getLastSnapshotId()); } return ref; }
Example #11
Source File: SnapshotFSImageFormat.java From big-c with Apache License 2.0 | 6 votes |
/** * Load {@link DirectoryDiff} from fsimage. * @param parent The directory that the SnapshotDiff belongs to. * @param in The {@link DataInput} instance to read. * @param loader The {@link Loader} instance that this loading procedure is * using. * @return A {@link DirectoryDiff}. */ private static DirectoryDiff loadDirectoryDiff(INodeDirectory parent, DataInput in, FSImageFormat.Loader loader) throws IOException { // 1. Read the full path of the Snapshot root to identify the Snapshot final Snapshot snapshot = loader.getSnapshot(in); // 2. Load DirectoryDiff#childrenSize int childrenSize = in.readInt(); // 3. Load DirectoryDiff#snapshotINode INodeDirectoryAttributes snapshotINode = loadSnapshotINodeInDirectoryDiff( snapshot, in, loader); // 4. Load the created list in SnapshotDiff#Diff List<INode> createdList = loadCreatedList(parent, in); // 5. Load the deleted list in SnapshotDiff#Diff List<INode> deletedList = loadDeletedList(parent, createdList, in, loader); // 6. Compose the SnapshotDiff List<DirectoryDiff> diffs = parent.getDiffs().asList(); DirectoryDiff sdiff = new DirectoryDiff(snapshot.getId(), snapshotINode, diffs.isEmpty() ? null : diffs.get(0), childrenSize, createdList, deletedList, snapshotINode == snapshot.getRoot()); return sdiff; }
Example #12
Source File: TestNestedSnapshots.java From hadoop with Apache License 2.0 | 6 votes |
/** * When we have nested snapshottable directories and if we try to reset the * snapshottable descendant back to an regular directory, we need to replace * the snapshottable descendant with an INodeDirectoryWithSnapshot */ @Test public void testDisallowNestedSnapshottableDir() throws Exception { cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true); final Path dir = new Path("/dir"); final Path sub = new Path(dir, "sub"); hdfs.mkdirs(sub); SnapshotTestHelper.createSnapshot(hdfs, dir, "s1"); final Path file = new Path(sub, "file"); DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED); FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); INode subNode = fsdir.getINode(sub.toString()); assertTrue(subNode.asDirectory().isWithSnapshot()); hdfs.allowSnapshot(sub); subNode = fsdir.getINode(sub.toString()); assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable()); hdfs.disallowSnapshot(sub); subNode = fsdir.getINode(sub.toString()); assertTrue(subNode.asDirectory().isWithSnapshot()); }
Example #13
Source File: SnapshotFSImageFormat.java From big-c with Apache License 2.0 | 6 votes |
/** * Save SnapshotDiff list for an INodeDirectoryWithSnapshot. * @param sNode The directory that the SnapshotDiff list belongs to. * @param out The {@link DataOutput} to write. */ private static <N extends INode, A extends INodeAttributes, D extends AbstractINodeDiff<N, A, D>> void saveINodeDiffs(final AbstractINodeDiffList<N, A, D> diffs, final DataOutput out, ReferenceMap referenceMap) throws IOException { // Record the diffs in reversed order, so that we can find the correct // reference for INodes in the created list when loading the FSImage if (diffs == null) { out.writeInt(-1); // no diffs } else { final List<D> list = diffs.asList(); final int size = list.size(); out.writeInt(size); for (int i = size - 1; i >= 0; i--) { list.get(i).write(out, referenceMap); } } }
Example #14
Source File: TestNNAnalyticsBase.java From NNAnalytics with Apache License 2.0 | 6 votes |
@Test public void testTransformDiskspaceConsumedByBeingWritten() { Map<String, Function<INode, Long>> transformMap = Transforms.getAttributeTransforms( "isUnderConstruction:eq:true", "fileReplica", "1", nna.getLoader()); assertThat(transformMap.size(), is(CoreMatchers.not(0))); Function<INode, Long> fileReplicaTransform = transformMap.get("diskspaceConsumed"); assertThat(fileReplicaTransform, is(notNullValue())); Collection<INode> files = nna.getLoader().getINodeSet("files"); long diskspaceConsumed = files .stream() .mapToLong(node -> node.asFile().getFileReplication() * node.asFile().computeFileSize()) .sum(); long transformedDiskspaceConsumed = files.stream().mapToLong(fileReplicaTransform::apply).sum(); assertThat(transformedDiskspaceConsumed == diskspaceConsumed, is(true)); }
Example #15
Source File: TestNNAnalyticsBase.java From NNAnalytics with Apache License 2.0 | 6 votes |
@Test public void testTransformDiskspaceConsumedByUser() { Map<String, Function<INode, Long>> transformMap = Transforms.getAttributeTransforms("user:eq:hdfs", "fileReplica", "1", nna.getLoader()); assertThat(transformMap.size(), is(CoreMatchers.not(0))); Function<INode, Long> fileReplicaTransform = transformMap.get("diskspaceConsumed"); assertThat(fileReplicaTransform, is(notNullValue())); Collection<INode> files = nna.getLoader().getINodeSet("files"); long diskspaceConsumed = files .stream() .mapToLong(node -> node.asFile().getFileReplication() * node.asFile().computeFileSize()) .sum(); long transformedDiskspaceConsumed = files.stream().mapToLong(fileReplicaTransform::apply).sum(); assertThat(transformedDiskspaceConsumed < diskspaceConsumed, is(true)); }
Example #16
Source File: TestNestedSnapshots.java From big-c with Apache License 2.0 | 6 votes |
/** * When we have nested snapshottable directories and if we try to reset the * snapshottable descendant back to an regular directory, we need to replace * the snapshottable descendant with an INodeDirectoryWithSnapshot */ @Test public void testDisallowNestedSnapshottableDir() throws Exception { cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true); final Path dir = new Path("/dir"); final Path sub = new Path(dir, "sub"); hdfs.mkdirs(sub); SnapshotTestHelper.createSnapshot(hdfs, dir, "s1"); final Path file = new Path(sub, "file"); DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED); FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); INode subNode = fsdir.getINode(sub.toString()); assertTrue(subNode.asDirectory().isWithSnapshot()); hdfs.allowSnapshot(sub); subNode = fsdir.getINode(sub.toString()); assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable()); hdfs.disallowSnapshot(sub); subNode = fsdir.getINode(sub.toString()); assertTrue(subNode.asDirectory().isWithSnapshot()); }
Example #17
Source File: SnapshotFSImageFormat.java From hadoop with Apache License 2.0 | 6 votes |
public INodeReference.WithCount loadINodeReferenceWithCount( boolean isSnapshotINode, DataInput in, FSImageFormat.Loader loader ) throws IOException { final boolean firstReferred = in.readBoolean(); final INodeReference.WithCount withCount; if (firstReferred) { final INode referred = loader.loadINodeWithLocalName(isSnapshotINode, in, true); withCount = new INodeReference.WithCount(null, referred); referenceMap.put(withCount.getId(), withCount); } else { final long id = in.readLong(); withCount = referenceMap.get(id); } return withCount; }
Example #18
Source File: DirectoryWithSnapshotFeature.java From big-c with Apache License 2.0 | 6 votes |
/** clear the created list */ private QuotaCounts destroyCreatedList( final BlockStoragePolicySuite bsps, final INodeDirectory currentINode, final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) { QuotaCounts counts = new QuotaCounts.Builder().build(); final List<INode> createdList = getList(ListType.CREATED); for (INode c : createdList) { c.computeQuotaUsage(bsps, counts, true); c.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes); // c should be contained in the children list, remove it currentINode.removeChild(c); } createdList.clear(); return counts; }
Example #19
Source File: TestHadoopNNAWithStreamEngine.java From NNAnalytics with Apache License 2.0 | 6 votes |
@BeforeClass public static void beforeClass() throws Exception { GSetGenerator gSetGenerator = new GSetGenerator(); gSetGenerator.clear(); GSet<INode, INodeWithAdditionalFields> gset = gSetGenerator.getGSet((short) 3, 10, 500); nna = new HadoopWebServerMain(); ApplicationConfiguration conf = new ApplicationConfiguration(); conf.set("ldap.enable", "false"); conf.set("authorization.enable", "false"); conf.set("nna.historical", "false"); conf.set("nna.base.dir", MiniDFSCluster.getBaseDirectory()); conf.set("nna.web.base.dir", "src/main/resources/webapps/nna"); conf.set("nna.query.engine.impl", JavaStreamQueryEngine.class.getCanonicalName()); nna.init(conf, gset); hostPort = new HttpHost("localhost", 4567); }
Example #20
Source File: TestRenameWithSnapshots.java From big-c with Apache License 2.0 | 6 votes |
/** * Test rename from a non-snapshottable dir to a snapshottable dir */ @Test (timeout=60000) public void testRenameFromNonSDir2SDir() throws Exception { final Path sdir1 = new Path("/dir1"); final Path sdir2 = new Path("/dir2"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); final Path foo = new Path(sdir1, "foo"); final Path bar = new Path(foo, "bar"); DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED); SnapshotTestHelper.createSnapshot(hdfs, sdir2, snap1); final Path newfoo = new Path(sdir2, "foo"); hdfs.rename(foo, newfoo); INode fooNode = fsdir.getINode4Write(newfoo.toString()); assertTrue(fooNode instanceof INodeDirectory); }
Example #21
Source File: DirectoryWithSnapshotFeature.java From hadoop with Apache License 2.0 | 6 votes |
@Override QuotaCounts combinePosteriorAndCollectBlocks( final BlockStoragePolicySuite bsps, final INodeDirectory currentDir, final DirectoryDiff posterior, final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) { final QuotaCounts counts = new QuotaCounts.Builder().build(); diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() { /** Collect blocks for deleted files. */ @Override public void process(INode inode) { if (inode != null) { inode.computeQuotaUsage(bsps, counts, false); inode.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes); } } }); return counts; }
Example #22
Source File: TestDiff.java From big-c with Apache License 2.0 | 6 votes |
static void delete(INode inode, final List<INode> current, Diff<byte[], INode> diff) { final int i = Diff.search(current, inode.getKey()); current.remove(i); if (diff != null) { //test undo with 1/UNDO_TEST_P probability final boolean testUndo = RANDOM.nextInt(UNDO_TEST_P) == 0; String before = null; if (testUndo) { before = diff.toString(); } final UndoInfo<INode> undoInfo = diff.delete(inode); if (testUndo) { final String after = diff.toString(); //undo diff.undoDelete(inode, undoInfo); assertDiff(before, diff); //re-do diff.delete(inode); assertDiff(after, diff); } } }
Example #23
Source File: DirectoryWithSnapshotFeature.java From hadoop with Apache License 2.0 | 5 votes |
public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps, final ContentCounts counts) { // Create a new blank summary context for blocking processing of subtree. ContentSummaryComputationContext summary = new ContentSummaryComputationContext(bsps); for(DirectoryDiff d : diffs) { for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) { deleted.computeContentSummary(summary); } } // Add the counts from deleted trees. counts.addContents(summary.getCounts()); // Add the deleted directory count. counts.addContent(Content.DIRECTORY, diffs.asList().size()); }
Example #24
Source File: TestNoHistorical.java From NNAnalytics with Apache License 2.0 | 5 votes |
@BeforeClass public static void beforeClass() throws Exception { GSetGenerator gSetGenerator = new GSetGenerator(); gSetGenerator.clear(); GSet<INode, INodeWithAdditionalFields> gset = gSetGenerator.getGSet((short) 3, 10, 500); nna = new WebServerMain(); ApplicationConfiguration conf = new ApplicationConfiguration(); conf.set("ldap.enable", "false"); conf.set("authorization.enable", "false"); conf.set("nna.historical", "false"); conf.set("nna.base.dir", MiniDFSCluster.getBaseDirectory()); conf.set("nna.web.base.dir", "src/main/resources/webapps/nna"); nna.init(conf, gset); hostPort = new HttpHost("localhost", 4567); }
Example #25
Source File: BaseOperation.java From NNAnalytics with Apache License 2.0 | 5 votes |
BaseOperation( Collection<INode> toPerform, String owner, String query, String logbaseDir, FileSystem fs) { this.pathsOperated = new ArrayList<>(); this.iterator = toPerform.iterator(); this.nextToOperate = iterator.hasNext() ? iterator.next() : null; this.toOperate = toPerform; this.owner = owner; this.identity = UUID.randomUUID().toString(); this.totalToOperate = toPerform.size(); this.query = query; final boolean gzipLog = toPerform.size() >= (100 * 1000); this.log = new OperationLog(identity, logbaseDir, query, owner, gzipLog); this.fs = fs; }
Example #26
Source File: TestINodeTransfer.java From NNAnalytics with Apache License 2.0 | 5 votes |
@Test public void filterFiles() { List<INodeWithAdditionalFields> allFiles = StreamSupport.stream(gset.spliterator(), true) .filter(INode::isFile) .collect(Collectors.toList()); assertThat(allFiles.size(), is(GSetGenerator.FILES_MADE)); }
Example #27
Source File: DirectoryWithSnapshotFeature.java From big-c with Apache License 2.0 | 5 votes |
/** Get the list of INodeDirectory contained in the deleted list */ private void getDirsInDeleted(List<INodeDirectory> dirList) { for (INode node : getList(ListType.DELETED)) { if (node.isDirectory()) { dirList.add(node.asDirectory()); } } }
Example #28
Source File: GSetSeperatorWrapper.java From NNAnalytics with Apache License 2.0 | 5 votes |
@Override public INodeWithAdditionalFields get(INode key) { INodeWithAdditionalFields val; val = fileSet.get(key); if (val != null) { return val; } return dirSet.get(key); }
Example #29
Source File: DirectoryWithSnapshotFeature.java From hadoop with Apache License 2.0 | 5 votes |
/** * @return If there is no corresponding directory diff for the given * snapshot, this means that the current children list should be * returned for the snapshot. Otherwise we calculate the children list * for the snapshot and return it. */ public ReadOnlyList<INode> getChildrenList(INodeDirectory currentINode, final int snapshotId) { final DirectoryDiff diff = diffs.getDiffById(snapshotId); return diff != null ? diff.getChildrenList(currentINode) : currentINode .getChildrenList(Snapshot.CURRENT_STATE_ID); }
Example #30
Source File: TestDiff.java From hadoop with Apache License 2.0 | 5 votes |
static void modify(INode inode, final List<INode> current, Diff<byte[], INode> diff) { final int i = Diff.search(current, inode.getKey()); Assert.assertTrue(i >= 0); final INodeDirectory oldinode = (INodeDirectory)current.get(i); final INodeDirectory newinode = new INodeDirectory(oldinode, false, oldinode.getFeatures()); newinode.setModificationTime(oldinode.getModificationTime() + 1); current.set(i, newinode); if (diff != null) { //test undo with 1/UNDO_TEST_P probability final boolean testUndo = RANDOM.nextInt(UNDO_TEST_P) == 0; String before = null; if (testUndo) { before = diff.toString(); } final UndoInfo<INode> undoInfo = diff.modify(oldinode, newinode); if (testUndo) { final String after = diff.toString(); //undo diff.undoModify(oldinode, newinode, undoInfo); assertDiff(before, diff); //re-do diff.modify(oldinode, newinode); assertDiff(after, diff); } } }