Java Code Examples for org.apache.hadoop.hdfs.protocol.Block#setGenerationStamp()
The following examples show how to use
org.apache.hadoop.hdfs.protocol.Block#setGenerationStamp() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FSDataset.java From RDFS with Apache License 2.0 | 6 votes |
/** {@inheritDoc} */ public Block getStoredBlock(int namespaceId, long blkid, boolean useOnDiskLength) throws IOException { lock.readLock().lock(); try { File blockfile = findBlockFile(namespaceId, blkid); if (blockfile == null) { return null; } File metafile = findMetaFile(blockfile, true); if (metafile == null) { return null; } Block block = new Block(blkid); if (useOnDiskLength) { block.setNumBytes(getOnDiskLength(namespaceId, block)); } else { block.setNumBytes(getVisibleLength(namespaceId, block)); } block.setGenerationStamp(parseGenerationStamp(blockfile, metafile)); return block; } finally { lock.readLock().unlock(); } }
Example 2
Source File: TestReplicaMap.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test for ReplicasMap.get(Block) and ReplicasMap.get(long) tests */ @Test public void testGet() { // Test 1: null argument throws invalid argument exception try { map.get(bpid, null); fail("Expected exception not thrown"); } catch (IllegalArgumentException expected) { } // Test 2: successful lookup based on block assertNotNull(map.get(bpid, block)); // Test 3: Lookup failure - generation stamp mismatch Block b = new Block(block); b.setGenerationStamp(0); assertNull(map.get(bpid, b)); // Test 4: Lookup failure - blockID mismatch b.setGenerationStamp(block.getGenerationStamp()); b.setBlockId(0); assertNull(map.get(bpid, b)); // Test 5: successful lookup based on block ID assertNotNull(map.get(bpid, block.getBlockId())); // Test 6: failed lookup for invalid block ID assertNull(map.get(bpid, 0)); }
Example 3
Source File: TestReplicaMap.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testRemove() { // Test 1: null argument throws invalid argument exception try { map.remove(bpid, null); fail("Expected exception not thrown"); } catch (IllegalArgumentException expected) { } // Test 2: remove failure - generation stamp mismatch Block b = new Block(block); b.setGenerationStamp(0); assertNull(map.remove(bpid, b)); // Test 3: remove failure - blockID mismatch b.setGenerationStamp(block.getGenerationStamp()); b.setBlockId(0); assertNull(map.remove(bpid, b)); // Test 4: remove success assertNotNull(map.remove(bpid, block)); // Test 5: remove failure - invalid blockID assertNull(map.remove(bpid, 0)); // Test 6: remove success map.add(bpid, new FinalizedReplica(block, null, null)); assertNotNull(map.remove(bpid, block.getBlockId())); }
Example 4
Source File: TestReplicaMap.java From big-c with Apache License 2.0 | 5 votes |
/** * Test for ReplicasMap.get(Block) and ReplicasMap.get(long) tests */ @Test public void testGet() { // Test 1: null argument throws invalid argument exception try { map.get(bpid, null); fail("Expected exception not thrown"); } catch (IllegalArgumentException expected) { } // Test 2: successful lookup based on block assertNotNull(map.get(bpid, block)); // Test 3: Lookup failure - generation stamp mismatch Block b = new Block(block); b.setGenerationStamp(0); assertNull(map.get(bpid, b)); // Test 4: Lookup failure - blockID mismatch b.setGenerationStamp(block.getGenerationStamp()); b.setBlockId(0); assertNull(map.get(bpid, b)); // Test 5: successful lookup based on block ID assertNotNull(map.get(bpid, block.getBlockId())); // Test 6: failed lookup for invalid block ID assertNull(map.get(bpid, 0)); }
Example 5
Source File: TestReplicaMap.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testRemove() { // Test 1: null argument throws invalid argument exception try { map.remove(bpid, null); fail("Expected exception not thrown"); } catch (IllegalArgumentException expected) { } // Test 2: remove failure - generation stamp mismatch Block b = new Block(block); b.setGenerationStamp(0); assertNull(map.remove(bpid, b)); // Test 3: remove failure - blockID mismatch b.setGenerationStamp(block.getGenerationStamp()); b.setBlockId(0); assertNull(map.remove(bpid, b)); // Test 4: remove success assertNotNull(map.remove(bpid, block)); // Test 5: remove failure - invalid blockID assertNull(map.remove(bpid, 0)); // Test 6: remove success map.add(bpid, new FinalizedReplica(block, null, null)); assertNotNull(map.remove(bpid, block.getBlockId())); }
Example 6
Source File: SimulatedFSDataset.java From RDFS with Apache License 2.0 | 5 votes |
/** {@inheritDoc} */ public Block getStoredBlock(int namespaceId, long blkid) throws IOException { Block b = new Block(blkid); BInfo binfo = getBlockMap(namespaceId).get(b); if (binfo == null) { return null; } b.setGenerationStamp(binfo.getGenerationStamp()); b.setNumBytes(binfo.getlength()); return b; }
Example 7
Source File: SimulatedFSDataset.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** {@inheritDoc} */ public Block getStoredBlock(long blkid) throws IOException { Block b = new Block(blkid); BInfo binfo = blockMap.get(b); if (binfo == null) { return null; } b.setGenerationStamp(binfo.getGenerationStamp()); b.setNumBytes(binfo.getlength()); return b; }
Example 8
Source File: TestLeaseRecovery.java From RDFS with Apache License 2.0 | 4 votes |
/** * The following test first creates a file with a few blocks. * It randomly truncates the replica of the last block stored in each datanode. * Finally, it triggers block synchronization to synchronize all stored block. * @param forceOneBlockToZero if true, will truncate one block to 0 length */ public void runTestBlockSynchronization(boolean forceOneBlockToZero) throws Exception { final int ORG_FILE_SIZE = 3000; //create a file DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L); assertTrue(dfs.dfs.exists(filestr)); DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM); //get block info for the last block LocatedBlockWithMetaInfo locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock( dfs.dfs.namenode, filestr); int namespaceId = locatedblock.getNamespaceID(); DatanodeInfo[] datanodeinfos = locatedblock.getLocations(); assertEquals(REPLICATION_NUM, datanodeinfos.length); //connect to data nodes InterDatanodeProtocol[] idps = new InterDatanodeProtocol[REPLICATION_NUM]; DataNode[] datanodes = new DataNode[REPLICATION_NUM]; for(int i = 0; i < REPLICATION_NUM; i++) { idps[i] = DataNode.createInterDataNodeProtocolProxy( datanodeinfos[i], conf, 0); datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort()); assertTrue(datanodes[i] != null); } //verify BlockMetaDataInfo Block lastblock = locatedblock.getBlock(); DataNode.LOG.info("newblocks=" + lastblock); for(int i = 0; i < REPLICATION_NUM; i++) { checkMetaInfo(namespaceId, lastblock, idps[i]); } //setup random block sizes int lastblocksize = ORG_FILE_SIZE % BLOCK_SIZE; Integer[] newblocksizes = new Integer[REPLICATION_NUM]; for(int i = 0; i < REPLICATION_NUM; i++) { newblocksizes[i] = AppendTestUtil.nextInt(lastblocksize); } if (forceOneBlockToZero) { newblocksizes[0] = 0; } DataNode.LOG.info("newblocksizes = " + Arrays.asList(newblocksizes)); //update blocks with random block sizes Block[] newblocks = new Block[REPLICATION_NUM]; for(int i = 0; i < REPLICATION_NUM; i++) { DataNode dn = datanodes[i]; FSDatasetTestUtil.truncateBlock(dn, lastblock, newblocksizes[i], namespaceId); newblocks[i] = new Block(lastblock.getBlockId(), newblocksizes[i], lastblock.getGenerationStamp()); checkMetaInfo(namespaceId, newblocks[i], idps[i]); } DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName); cluster.getNameNode().append(filestr, dfs.dfs.clientName); //block synchronization final int primarydatanodeindex = AppendTestUtil.nextInt(datanodes.length); DataNode.LOG.info("primarydatanodeindex =" + primarydatanodeindex); DataNode primary = datanodes[primarydatanodeindex]; DataNode.LOG.info("primary.dnRegistration=" + primary.getDNRegistrationForNS( cluster.getNameNode().getNamespaceID())); primary.recoverBlocks(namespaceId, new Block[]{lastblock}, new DatanodeInfo[][]{datanodeinfos}).join(); BlockMetaDataInfo[] updatedmetainfo = new BlockMetaDataInfo[REPLICATION_NUM]; int minsize = min(newblocksizes); long currentGS = cluster.getNameNode().namesystem.getGenerationStamp(); lastblock.setGenerationStamp(currentGS); for(int i = 0; i < REPLICATION_NUM; i++) { updatedmetainfo[i] = idps[i].getBlockMetaDataInfo( namespaceId, lastblock); RPC.stopProxy(idps[i]); assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId()); assertEquals(minsize, updatedmetainfo[i].getNumBytes()); assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp()); } }
Example 9
Source File: TestLeaseRecovery.java From hadoop-gpu with Apache License 2.0 | 4 votes |
/** * The following test first creates a file with a few blocks. * It randomly truncates the replica of the last block stored in each datanode. * Finally, it triggers block synchronization to synchronize all stored block. */ public void testBlockSynchronization() throws Exception { final int ORG_FILE_SIZE = 3000; Configuration conf = new Configuration(); conf.setLong("dfs.block.size", BLOCK_SIZE); conf.setBoolean("dfs.support.append", true); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster(conf, 5, true, null); cluster.waitActive(); //create a file DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L); assertTrue(dfs.dfs.exists(filestr)); DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM); //get block info for the last block LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock( dfs.dfs.namenode, filestr); DatanodeInfo[] datanodeinfos = locatedblock.getLocations(); assertEquals(REPLICATION_NUM, datanodeinfos.length); //connect to data nodes InterDatanodeProtocol[] idps = new InterDatanodeProtocol[REPLICATION_NUM]; DataNode[] datanodes = new DataNode[REPLICATION_NUM]; for(int i = 0; i < REPLICATION_NUM; i++) { idps[i] = DataNode.createInterDataNodeProtocolProxy(datanodeinfos[i], conf); datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort()); assertTrue(datanodes[i] != null); } //verify BlockMetaDataInfo Block lastblock = locatedblock.getBlock(); DataNode.LOG.info("newblocks=" + lastblock); for(int i = 0; i < REPLICATION_NUM; i++) { checkMetaInfo(lastblock, idps[i]); } //setup random block sizes int lastblocksize = ORG_FILE_SIZE % BLOCK_SIZE; Integer[] newblocksizes = new Integer[REPLICATION_NUM]; for(int i = 0; i < REPLICATION_NUM; i++) { newblocksizes[i] = AppendTestUtil.nextInt(lastblocksize); } DataNode.LOG.info("newblocksizes = " + Arrays.asList(newblocksizes)); //update blocks with random block sizes Block[] newblocks = new Block[REPLICATION_NUM]; for(int i = 0; i < REPLICATION_NUM; i++) { newblocks[i] = new Block(lastblock.getBlockId(), newblocksizes[i], lastblock.getGenerationStamp()); idps[i].updateBlock(lastblock, newblocks[i], false); checkMetaInfo(newblocks[i], idps[i]); } DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName); cluster.getNameNode().append(filestr, dfs.dfs.clientName); //block synchronization final int primarydatanodeindex = AppendTestUtil.nextInt(datanodes.length); DataNode.LOG.info("primarydatanodeindex =" + primarydatanodeindex); DataNode primary = datanodes[primarydatanodeindex]; DataNode.LOG.info("primary.dnRegistration=" + primary.dnRegistration); primary.recoverBlocks(new Block[]{lastblock}, new DatanodeInfo[][]{datanodeinfos}).join(); BlockMetaDataInfo[] updatedmetainfo = new BlockMetaDataInfo[REPLICATION_NUM]; int minsize = min(newblocksizes); long currentGS = cluster.getNameNode().namesystem.getGenerationStamp(); lastblock.setGenerationStamp(currentGS); for(int i = 0; i < REPLICATION_NUM; i++) { updatedmetainfo[i] = idps[i].getBlockMetaDataInfo(lastblock); assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId()); assertEquals(minsize, updatedmetainfo[i].getNumBytes()); assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp()); } } finally { if (cluster != null) {cluster.shutdown();} } }