org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestINodeFile.java From hadoop with Apache License 2.0 | 6 votes |
/** * Creates the required number of files with one block each * @param nCount Number of INodes to create * @return Array of INode files */ private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) { if(nCount <= 0) return new INodeFile[1]; replication = 3; preferredBlockSize = 128 * 1024 * 1024; INodeFile[] iNodes = new INodeFile[nCount]; for (int i = 0; i < nCount; i++) { iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication, preferredBlockSize, (byte)0); iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i)); BlockInfoContiguous newblock = new BlockInfoContiguous(replication); iNodes[i].addBlock(newblock); } return iNodes; }
Example #2
Source File: INodeFile.java From hadoop with Apache License 2.0 | 6 votes |
/** * Remove a block from the block list. This block should be * the last one on the list. */ boolean removeLastBlock(Block oldblock) { Preconditions.checkState(isUnderConstruction(), "file is no longer under construction"); if (blocks == null || blocks.length == 0) { return false; } int size_1 = blocks.length - 1; if (!blocks[size_1].equals(oldblock)) { return false; } //copy to a new list BlockInfoContiguous[] newlist = new BlockInfoContiguous[size_1]; System.arraycopy(blocks, 0, newlist, 0, size_1); setBlocks(newlist); return true; }
Example #3
Source File: INodeFile.java From hadoop with Apache License 2.0 | 6 votes |
/** * append array of blocks to this.blocks */ void concatBlocks(INodeFile[] inodes) { int size = this.blocks.length; int totalAddedBlocks = 0; for(INodeFile f : inodes) { totalAddedBlocks += f.blocks.length; } BlockInfoContiguous[] newlist = new BlockInfoContiguous[size + totalAddedBlocks]; System.arraycopy(this.blocks, 0, newlist, 0, size); for(INodeFile in: inodes) { System.arraycopy(in.blocks, 0, newlist, size, in.blocks.length); size += in.blocks.length; } setBlocks(newlist); updateBlockCollection(); }
Example #4
Source File: INodeFile.java From big-c with Apache License 2.0 | 6 votes |
@Override public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) { if (blocks != null && collectedBlocks != null) { for (BlockInfoContiguous blk : blocks) { collectedBlocks.addDeleteBlock(blk); blk.setBlockCollection(null); } } setBlocks(null); if (getAclFeature() != null) { AclStorage.removeAclFeature(getAclFeature()); } clear(); removedINodes.add(this); FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); if (sf != null) { sf.getDiffs().destroyAndCollectSnapshotBlocks(collectedBlocks); sf.clearDiffs(); } }
Example #5
Source File: INodeFile.java From big-c with Apache License 2.0 | 6 votes |
public void collectBlocksBeyondSnapshot(BlockInfoContiguous[] snapshotBlocks, BlocksMapUpdateInfo collectedBlocks) { BlockInfoContiguous[] oldBlocks = getBlocks(); if(snapshotBlocks == null || oldBlocks == null) return; // Skip blocks in common between the file and the snapshot int n = 0; while(n < oldBlocks.length && n < snapshotBlocks.length && oldBlocks[n] == snapshotBlocks[n]) { n++; } truncateBlocksTo(n); // Collect the remaining blocks of the file while(n < oldBlocks.length) { collectedBlocks.addDeleteBlock(oldBlocks[n++]); } }
Example #6
Source File: TestCommitBlockSynchronization.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testCommitBlockSynchronizationWithClose() throws IOException { INodeFile file = mockFileUnderConstruction(); Block block = new Block(blockId, length, genStamp); FSNamesystem namesystemSpy = makeNameSystemSpy(block, file); DatanodeID[] newTargets = new DatanodeID[0]; ExtendedBlock lastBlock = new ExtendedBlock(); namesystemSpy.commitBlockSynchronization( lastBlock, genStamp, length, true, false, newTargets, null); // Repeat the call to make sure it returns true namesystemSpy.commitBlockSynchronization( lastBlock, genStamp, length, true, false, newTargets, null); BlockInfoContiguous completedBlockInfo = new BlockInfoContiguous(block, (short) 1); completedBlockInfo.setBlockCollection(file); completedBlockInfo.setGenerationStamp(genStamp); doReturn(completedBlockInfo).when(namesystemSpy) .getStoredBlock(any(Block.class)); doReturn(completedBlockInfo).when(file).getLastBlock(); namesystemSpy.commitBlockSynchronization( lastBlock, genStamp, length, true, false, newTargets, null); }
Example #7
Source File: INodeFile.java From big-c with Apache License 2.0 | 6 votes |
/** * append array of blocks to this.blocks */ void concatBlocks(INodeFile[] inodes) { int size = this.blocks.length; int totalAddedBlocks = 0; for(INodeFile f : inodes) { totalAddedBlocks += f.blocks.length; } BlockInfoContiguous[] newlist = new BlockInfoContiguous[size + totalAddedBlocks]; System.arraycopy(this.blocks, 0, newlist, 0, size); for(INodeFile in: inodes) { System.arraycopy(in.blocks, 0, newlist, size, in.blocks.length); size += in.blocks.length; } setBlocks(newlist); updateBlockCollection(); }
Example #8
Source File: INodeFile.java From hadoop with Apache License 2.0 | 6 votes |
@Override public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) { if (blocks != null && collectedBlocks != null) { for (BlockInfoContiguous blk : blocks) { collectedBlocks.addDeleteBlock(blk); blk.setBlockCollection(null); } } setBlocks(BlockInfoContiguous.EMPTY_ARRAY); if (getAclFeature() != null) { AclStorage.removeAclFeature(getAclFeature()); } clear(); removedINodes.add(this); FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); if (sf != null) { sf.getDiffs().destroyAndCollectSnapshotBlocks(collectedBlocks); sf.clearDiffs(); } }
Example #9
Source File: INodeFile.java From big-c with Apache License 2.0 | 6 votes |
/** * Remove a block from the block list. This block should be * the last one on the list. */ boolean removeLastBlock(Block oldblock) { Preconditions.checkState(isUnderConstruction(), "file is no longer under construction"); if (blocks == null || blocks.length == 0) { return false; } int size_1 = blocks.length - 1; if (!blocks[size_1].equals(oldblock)) { return false; } //copy to a new list BlockInfoContiguous[] newlist = new BlockInfoContiguous[size_1]; System.arraycopy(blocks, 0, newlist, 0, size_1); setBlocks(newlist); return true; }
Example #10
Source File: INodeFile.java From hadoop with Apache License 2.0 | 6 votes |
public void collectBlocksBeyondSnapshot(BlockInfoContiguous[] snapshotBlocks, BlocksMapUpdateInfo collectedBlocks) { BlockInfoContiguous[] oldBlocks = getBlocks(); if(snapshotBlocks == null || oldBlocks == null) return; // Skip blocks in common between the file and the snapshot int n = 0; while(n < oldBlocks.length && n < snapshotBlocks.length && oldBlocks[n] == snapshotBlocks[n]) { n++; } truncateBlocksTo(n); // Collect the remaining blocks of the file while(n < oldBlocks.length) { collectedBlocks.addDeleteBlock(oldBlocks[n++]); } }
Example #11
Source File: INodeFile.java From hadoop with Apache License 2.0 | 6 votes |
/** Exclude blocks collected for deletion that belong to a snapshot. */ void excludeSnapshotBlocks(int snapshotId, BlocksMapUpdateInfo collectedBlocks) { if(collectedBlocks == null || collectedBlocks.getToDeleteList().isEmpty()) return; FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); if(sf == null) return; BlockInfoContiguous[] snapshotBlocks = getDiffs().findEarlierSnapshotBlocks(snapshotId); if(snapshotBlocks == null) return; List<Block> toDelete = collectedBlocks.getToDeleteList(); for(Block blk : snapshotBlocks) { if(toDelete.contains(blk)) collectedBlocks.removeDeleteBlock(blk); } }
Example #12
Source File: TestEditLog.java From hadoop with Apache License 2.0 | 6 votes |
@Override public void run() { PermissionStatus p = namesystem.createFsOwnerPermissions( new FsPermission((short)0777)); FSEditLog editLog = namesystem.getEditLog(); for (int i = 0; i < numTransactions; i++) { INodeFile inode = new INodeFile(namesystem.dir.allocateNewInodeId(), null, p, 0L, 0L, BlockInfoContiguous.EMPTY_ARRAY, replication, blockSize); inode.toUnderConstruction("", ""); editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false); editLog.logCloseFile("/filename" + (startIndex + i), inode); editLog.logSync(); } }
Example #13
Source File: TestINodeFile.java From big-c with Apache License 2.0 | 6 votes |
/** * Creates the required number of files with one block each * @param nCount Number of INodes to create * @return Array of INode files */ private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) { if(nCount <= 0) return new INodeFile[1]; replication = 3; preferredBlockSize = 128 * 1024 * 1024; INodeFile[] iNodes = new INodeFile[nCount]; for (int i = 0; i < nCount; i++) { iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication, preferredBlockSize, (byte)0); iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i)); BlockInfoContiguous newblock = new BlockInfoContiguous(replication); iNodes[i].addBlock(newblock); } return iNodes; }
Example #14
Source File: TestGetBlockLocations.java From big-c with Apache License 2.0 | 6 votes |
private static FSNamesystem setupFileSystem() throws IOException { Configuration conf = new Configuration(); conf.setLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1L); FSEditLog editlog = mock(FSEditLog.class); FSImage image = mock(FSImage.class); when(image.getEditLog()).thenReturn(editlog); final FSNamesystem fsn = new FSNamesystem(conf, image, true); final FSDirectory fsd = fsn.getFSDirectory(); INodesInPath iip = fsd.getINodesInPath("/", true); PermissionStatus perm = new PermissionStatus( "hdfs", "supergroup", FsPermission.createImmutable((short) 0x1ff)); final INodeFile file = new INodeFile( MOCK_INODE_ID, FILE_NAME.getBytes(Charsets.UTF_8), perm, 1, 1, new BlockInfoContiguous[] {}, (short) 1, DFS_BLOCK_SIZE_DEFAULT); fsn.getFSDirectory().addINode(iip, file); return fsn; }
Example #15
Source File: FileWithSnapshotFeature.java From hadoop with Apache License 2.0 | 6 votes |
/** * If some blocks at the end of the block list no longer belongs to * any inode, collect them and update the block list. */ public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file, final BlocksMapUpdateInfo info, final List<INode> removedINodes) { // check if everything is deleted. if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) { file.destroyAndCollectBlocks(bsps, info, removedINodes); return; } // find max file size. final long max; FileDiff diff = getDiffs().getLast(); if (isCurrentFileDeleted()) { max = diff == null? 0: diff.getFileSize(); } else { max = file.computeFileSize(); } // Collect blocks that should be deleted FileDiff last = diffs.getLast(); BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks(); if(snapshotBlocks == null) file.collectBlocksBeyondMax(max, info); else file.collectBlocksBeyondSnapshot(snapshotBlocks, info); }
Example #16
Source File: DFSTestUtil.java From hadoop with Apache License 2.0 | 6 votes |
/** * @return the node which is expected to run the recovery of the * given block, which is known to be under construction inside the * given NameNOde. */ public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn, ExtendedBlock blk) { BlockManager bm0 = nn.getNamesystem().getBlockManager(); BlockInfoContiguous storedBlock = bm0.getStoredBlock(blk.getLocalBlock()); assertTrue("Block " + blk + " should be under construction, " + "got: " + storedBlock, storedBlock instanceof BlockInfoContiguousUnderConstruction); BlockInfoContiguousUnderConstruction ucBlock = (BlockInfoContiguousUnderConstruction)storedBlock; // We expect that the replica with the most recent heart beat will be // the one to be in charge of the synchronization / recovery protocol. final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations(); DatanodeStorageInfo expectedPrimary = storages[0]; long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor() .getLastUpdateMonotonic(); for (int i = 1; i < storages.length; i++) { final long lastUpdate = storages[i].getDatanodeDescriptor() .getLastUpdateMonotonic(); if (lastUpdate > mostRecentLastUpdate) { expectedPrimary = storages[i]; mostRecentLastUpdate = lastUpdate; } } return expectedPrimary.getDatanodeDescriptor(); }
Example #17
Source File: TestGetBlockLocations.java From hadoop with Apache License 2.0 | 6 votes |
private static FSNamesystem setupFileSystem() throws IOException { Configuration conf = new Configuration(); conf.setLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1L); FSEditLog editlog = mock(FSEditLog.class); FSImage image = mock(FSImage.class); when(image.getEditLog()).thenReturn(editlog); final FSNamesystem fsn = new FSNamesystem(conf, image, true); final FSDirectory fsd = fsn.getFSDirectory(); INodesInPath iip = fsd.getINodesInPath("/", true); PermissionStatus perm = new PermissionStatus( "hdfs", "supergroup", FsPermission.createImmutable((short) 0x1ff)); final INodeFile file = new INodeFile( MOCK_INODE_ID, FILE_NAME.getBytes(Charsets.UTF_8), perm, 1, 1, new BlockInfoContiguous[] {}, (short) 1, DFS_BLOCK_SIZE_DEFAULT); fsn.getFSDirectory().addINode(iip, file); return fsn; }
Example #18
Source File: TestEditLog.java From big-c with Apache License 2.0 | 6 votes |
@Override public void run() { PermissionStatus p = namesystem.createFsOwnerPermissions( new FsPermission((short)0777)); FSEditLog editLog = namesystem.getEditLog(); for (int i = 0; i < numTransactions; i++) { INodeFile inode = new INodeFile(namesystem.dir.allocateNewInodeId(), null, p, 0L, 0L, BlockInfoContiguous.EMPTY_ARRAY, replication, blockSize); inode.toUnderConstruction("", ""); editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false); editLog.logCloseFile("/filename" + (startIndex + i), inode); editLog.logSync(); } }
Example #19
Source File: INodeFile.java From big-c with Apache License 2.0 | 5 votes |
/** @return blocks of the file corresponding to the snapshot. */ public BlockInfoContiguous[] getBlocks(int snapshot) { if(snapshot == CURRENT_STATE_ID || getDiffs() == null) return getBlocks(); FileDiff diff = getDiffs().getDiffById(snapshot); BlockInfoContiguous[] snapshotBlocks = diff == null ? getBlocks() : diff.getBlocks(); if(snapshotBlocks != null) return snapshotBlocks; // Blocks are not in the current snapshot // Find next snapshot with blocks present or return current file blocks snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot); return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks; }
Example #20
Source File: INodeFile.java From big-c with Apache License 2.0 | 5 votes |
/** * compute the quota usage change for a truncate op * @param newLength the length for truncation * @return the quota usage delta (not considering replication factor) */ long computeQuotaDeltaForTruncate(final long newLength) { final BlockInfoContiguous[] blocks = getBlocks(); if (blocks == null || blocks.length == 0) { return 0; } int n = 0; long size = 0; for (; n < blocks.length && newLength > size; n++) { size += blocks[n].getNumBytes(); } final boolean onBoundary = size == newLength; long truncateSize = 0; for (int i = (onBoundary ? n : n - 1); i < blocks.length; i++) { truncateSize += blocks[i].getNumBytes(); } FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); if (sf != null) { FileDiff diff = sf.getDiffs().getLast(); BlockInfoContiguous[] sblocks = diff != null ? diff.getBlocks() : null; if (sblocks != null) { for (int i = (onBoundary ? n : n-1); i < blocks.length && i < sblocks.length && blocks[i].equals(sblocks[i]); i++) { truncateSize -= blocks[i].getNumBytes(); } } } return onBoundary ? -truncateSize : (getPreferredBlockSize() - truncateSize); }
Example #21
Source File: TestCommitBlockSynchronization.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testCommitBlockSynchronization() throws IOException { INodeFile file = mockFileUnderConstruction(); Block block = new Block(blockId, length, genStamp); FSNamesystem namesystemSpy = makeNameSystemSpy(block, file); DatanodeID[] newTargets = new DatanodeID[0]; ExtendedBlock lastBlock = new ExtendedBlock(); namesystemSpy.commitBlockSynchronization( lastBlock, genStamp, length, false, false, newTargets, null); // Repeat the call to make sure it does not throw namesystemSpy.commitBlockSynchronization( lastBlock, genStamp, length, false, false, newTargets, null); // Simulate 'completing' the block. BlockInfoContiguous completedBlockInfo = new BlockInfoContiguous(block, (short) 1); completedBlockInfo.setBlockCollection(file); completedBlockInfo.setGenerationStamp(genStamp); doReturn(completedBlockInfo).when(namesystemSpy) .getStoredBlock(any(Block.class)); doReturn(completedBlockInfo).when(file).getLastBlock(); // Repeat the call to make sure it does not throw namesystemSpy.commitBlockSynchronization( lastBlock, genStamp, length, false, false, newTargets, null); }
Example #22
Source File: FSDirectory.java From big-c with Apache License 2.0 | 5 votes |
/** * Add a block to the file. Returns a reference to the added block. */ BlockInfoContiguous addBlock(String path, INodesInPath inodesInPath, Block block, DatanodeStorageInfo[] targets) throws IOException { writeLock(); try { final INodeFile fileINode = inodesInPath.getLastINode().asFile(); Preconditions.checkState(fileINode.isUnderConstruction()); // check quota limits and updated space consumed updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(), fileINode.getBlockReplication(), true); // associate new last block for the file BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction( block, fileINode.getFileReplication(), BlockUCState.UNDER_CONSTRUCTION, targets); getBlockManager().addBlockCollection(blockInfo, fileINode); fileINode.addBlock(blockInfo); if(NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.addBlock: " + path + " with " + block + " block is added to the in-memory " + "file system"); } return blockInfo; } finally { writeUnlock(); } }
Example #23
Source File: TestSnapshotBlocksMap.java From hadoop with Apache License 2.0 | 5 votes |
/** * 1. rename under-construction file with 0-sized blocks after snapshot. * 2. delete the renamed directory. * make sure we delete the 0-sized block. * see HDFS-5476. */ @Test public void testDeletionWithZeroSizeBlock3() throws Exception { final Path foo = new Path("/foo"); final Path subDir = new Path(foo, "sub"); final Path bar = new Path(subDir, "bar"); DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L); hdfs.append(bar); INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); BlockInfoContiguous[] blks = barNode.getBlocks(); assertEquals(1, blks.length); ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); cluster.getNameNodeRpc() .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, null, barNode.getId(), null); SnapshotTestHelper.createSnapshot(hdfs, foo, "s1"); // rename bar final Path bar2 = new Path(subDir, "bar2"); hdfs.rename(bar, bar2); INodeFile bar2Node = fsdir.getINode4Write(bar2.toString()).asFile(); blks = bar2Node.getBlocks(); assertEquals(2, blks.length); assertEquals(BLOCKSIZE, blks[0].getNumBytes()); assertEquals(0, blks[1].getNumBytes()); // delete subDir hdfs.delete(subDir, true); final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar"); barNode = fsdir.getINode(sbar.toString()).asFile(); blks = barNode.getBlocks(); assertEquals(1, blks.length); assertEquals(BLOCKSIZE, blks[0].getNumBytes()); }
Example #24
Source File: TestSnapshotBlocksMap.java From hadoop with Apache License 2.0 | 5 votes |
/** * Make sure we delete 0-sized block when deleting an under-construction file */ @Test public void testDeletionWithZeroSizeBlock2() throws Exception { final Path foo = new Path("/foo"); final Path subDir = new Path(foo, "sub"); final Path bar = new Path(subDir, "bar"); DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L); hdfs.append(bar); INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); BlockInfoContiguous[] blks = barNode.getBlocks(); assertEquals(1, blks.length); ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); cluster.getNameNodeRpc() .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, null, barNode.getId(), null); SnapshotTestHelper.createSnapshot(hdfs, foo, "s1"); barNode = fsdir.getINode4Write(bar.toString()).asFile(); blks = barNode.getBlocks(); assertEquals(2, blks.length); assertEquals(BLOCKSIZE, blks[0].getNumBytes()); assertEquals(0, blks[1].getNumBytes()); hdfs.delete(subDir, true); final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar"); barNode = fsdir.getINode(sbar.toString()).asFile(); blks = barNode.getBlocks(); assertEquals(1, blks.length); assertEquals(BLOCKSIZE, blks[0].getNumBytes()); }
Example #25
Source File: TestSnapshotBlocksMap.java From hadoop with Apache License 2.0 | 5 votes |
/** * Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot */ @Test public void testDeletionWithZeroSizeBlock() throws Exception { final Path foo = new Path("/foo"); final Path bar = new Path(foo, "bar"); DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L); SnapshotTestHelper.createSnapshot(hdfs, foo, "s0"); hdfs.append(bar); INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); BlockInfoContiguous[] blks = barNode.getBlocks(); assertEquals(1, blks.length); assertEquals(BLOCKSIZE, blks[0].getNumBytes()); ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); cluster.getNameNodeRpc() .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, null, barNode.getId(), null); SnapshotTestHelper.createSnapshot(hdfs, foo, "s1"); barNode = fsdir.getINode4Write(bar.toString()).asFile(); blks = barNode.getBlocks(); assertEquals(2, blks.length); assertEquals(BLOCKSIZE, blks[0].getNumBytes()); assertEquals(0, blks[1].getNumBytes()); hdfs.delete(bar, true); final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", bar.getName()); barNode = fsdir.getINode(sbar.toString()).asFile(); blks = barNode.getBlocks(); assertEquals(1, blks.length); assertEquals(BLOCKSIZE, blks[0].getNumBytes()); }
Example #26
Source File: TestSnapshotBlocksMap.java From hadoop with Apache License 2.0 | 5 votes |
static INodeFile assertBlockCollection(String path, int numBlocks, final FSDirectory dir, final BlockManager blkManager) throws Exception { final INodeFile file = INodeFile.valueOf(dir.getINode(path), path); assertEquals(numBlocks, file.getBlocks().length); for(BlockInfoContiguous b : file.getBlocks()) { assertBlockCollection(blkManager, file, b); } return file; }
Example #27
Source File: TestSnapshotBlocksMap.java From big-c with Apache License 2.0 | 5 votes |
/** * 1. rename under-construction file with 0-sized blocks after snapshot. * 2. delete the renamed directory. * make sure we delete the 0-sized block. * see HDFS-5476. */ @Test public void testDeletionWithZeroSizeBlock3() throws Exception { final Path foo = new Path("/foo"); final Path subDir = new Path(foo, "sub"); final Path bar = new Path(subDir, "bar"); DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L); hdfs.append(bar); INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); BlockInfoContiguous[] blks = barNode.getBlocks(); assertEquals(1, blks.length); ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); cluster.getNameNodeRpc() .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, null, barNode.getId(), null); SnapshotTestHelper.createSnapshot(hdfs, foo, "s1"); // rename bar final Path bar2 = new Path(subDir, "bar2"); hdfs.rename(bar, bar2); INodeFile bar2Node = fsdir.getINode4Write(bar2.toString()).asFile(); blks = bar2Node.getBlocks(); assertEquals(2, blks.length); assertEquals(BLOCKSIZE, blks[0].getNumBytes()); assertEquals(0, blks[1].getNumBytes()); // delete subDir hdfs.delete(subDir, true); final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar"); barNode = fsdir.getINode(sbar.toString()).asFile(); blks = barNode.getBlocks(); assertEquals(1, blks.length); assertEquals(BLOCKSIZE, blks[0].getNumBytes()); }
Example #28
Source File: TestSnapshotBlocksMap.java From big-c with Apache License 2.0 | 5 votes |
/** * Make sure we delete 0-sized block when deleting an under-construction file */ @Test public void testDeletionWithZeroSizeBlock2() throws Exception { final Path foo = new Path("/foo"); final Path subDir = new Path(foo, "sub"); final Path bar = new Path(subDir, "bar"); DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L); hdfs.append(bar); INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); BlockInfoContiguous[] blks = barNode.getBlocks(); assertEquals(1, blks.length); ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); cluster.getNameNodeRpc() .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, null, barNode.getId(), null); SnapshotTestHelper.createSnapshot(hdfs, foo, "s1"); barNode = fsdir.getINode4Write(bar.toString()).asFile(); blks = barNode.getBlocks(); assertEquals(2, blks.length); assertEquals(BLOCKSIZE, blks[0].getNumBytes()); assertEquals(0, blks[1].getNumBytes()); hdfs.delete(subDir, true); final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar"); barNode = fsdir.getINode(sbar.toString()).asFile(); blks = barNode.getBlocks(); assertEquals(1, blks.length); assertEquals(BLOCKSIZE, blks[0].getNumBytes()); }
Example #29
Source File: FSEditLogLoader.java From hadoop with Apache License 2.0 | 5 votes |
/** * Add a new block into the given INodeFile */ private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file) throws IOException { BlockInfoContiguous[] oldBlocks = file.getBlocks(); Block pBlock = op.getPenultimateBlock(); Block newBlock= op.getLastBlock(); if (pBlock != null) { // the penultimate block is not null Preconditions.checkState(oldBlocks != null && oldBlocks.length > 0); // compare pBlock with the last block of oldBlocks Block oldLastBlock = oldBlocks[oldBlocks.length - 1]; if (oldLastBlock.getBlockId() != pBlock.getBlockId() || oldLastBlock.getGenerationStamp() != pBlock.getGenerationStamp()) { throw new IOException( "Mismatched block IDs or generation stamps for the old last block of file " + op.getPath() + ", the old last block is " + oldLastBlock + ", and the block read from editlog is " + pBlock); } oldLastBlock.setNumBytes(pBlock.getNumBytes()); if (oldLastBlock instanceof BlockInfoContiguousUnderConstruction) { fsNamesys.getBlockManager().forceCompleteBlock(file, (BlockInfoContiguousUnderConstruction) oldLastBlock); fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock); } } else { // the penultimate block is null Preconditions.checkState(oldBlocks == null || oldBlocks.length == 0); } // add the new block BlockInfoContiguous newBI = new BlockInfoContiguousUnderConstruction( newBlock, file.getBlockReplication()); fsNamesys.getBlockManager().addBlockCollection(newBI, file); file.addBlock(newBI); fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock); }
Example #30
Source File: INodeFile.java From big-c with Apache License 2.0 | 5 votes |
/** * Return the penultimate allocated block for this file. */ BlockInfoContiguous getPenultimateBlock() { if (blocks == null || blocks.length <= 1) { return null; } return blocks[blocks.length - 2]; }