Java Code Examples for org.apache.hadoop.hdfs.protocol.BlockListAsLongs#Builder
The following examples show how to use
org.apache.hadoop.hdfs.protocol.BlockListAsLongs#Builder .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SimulatedFSDataset.java From hadoop with Apache License 2.0 | 5 votes |
synchronized BlockListAsLongs getBlockReport(String bpid) { BlockListAsLongs.Builder report = BlockListAsLongs.builder(); final Map<Block, BInfo> map = blockMap.get(bpid); if (map != null) { for (BInfo b : map.values()) { if (b.isFinalized()) { report.add(b); } } } return report.build(); }
Example 2
Source File: SimulatedFSDataset.java From big-c with Apache License 2.0 | 5 votes |
synchronized BlockListAsLongs getBlockReport(String bpid) { BlockListAsLongs.Builder report = BlockListAsLongs.builder(); final Map<Block, BInfo> map = blockMap.get(bpid); if (map != null) { for (BInfo b : map.values()) { if (b.isFinalized()) { report.add(b); } } } return report.build(); }
Example 3
Source File: TestBlockManager.java From hadoop with Apache License 2.0 | 4 votes |
/** * test when NN starts and in same mode, it receives an incremental blockReport * firstly. Then receives first full block report. */ @Test public void testSafeModeIBRBeforeFirstFullBR() throws Exception { // pretend to be in safemode doReturn(true).when(fsn).isInStartupSafeMode(); DatanodeDescriptor node = nodes.get(0); DatanodeStorageInfo ds = node.getStorageInfos()[0]; node.isAlive = true; DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, ""); // register new node bm.getDatanodeManager().registerDatanode(nodeReg); bm.getDatanodeManager().addDatanode(node); assertEquals(node, bm.getDatanodeManager().getDatanode(node)); assertEquals(0, ds.getBlockReportCount()); // Build a incremental report List<ReceivedDeletedBlockInfo> rdbiList = new ArrayList<>(); // Build a full report BlockListAsLongs.Builder builder = BlockListAsLongs.builder(); // blk_42 is finalized. long receivedBlockId = 42; // arbitrary BlockInfoContiguous receivedBlock = addBlockToBM(receivedBlockId); rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null)); builder.add(new FinalizedReplica(receivedBlock, null, null)); // blk_43 is under construction. long receivingBlockId = 43; BlockInfoContiguous receivingBlock = addUcBlockToBM(receivingBlockId); rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null)); builder.add(new ReplicaBeingWritten(receivingBlock, null, null, null)); // blk_44 has 2 records in IBR. It's finalized. So full BR has 1 record. long receivingReceivedBlockId = 44; BlockInfoContiguous receivingReceivedBlock = addBlockToBM(receivingReceivedBlockId); rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null)); rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null)); builder.add(new FinalizedReplica(receivingReceivedBlock, null, null)); // blk_45 is not in full BR, because it's deleted. long ReceivedDeletedBlockId = 45; rdbiList.add(new ReceivedDeletedBlockInfo( new Block(ReceivedDeletedBlockId), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null)); rdbiList.add(new ReceivedDeletedBlockInfo( new Block(ReceivedDeletedBlockId), ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null)); // blk_46 exists in DN for a long time, so it's in full BR, but not in IBR. long existedBlockId = 46; BlockInfoContiguous existedBlock = addBlockToBM(existedBlockId); builder.add(new FinalizedReplica(existedBlock, null, null)); // process IBR and full BR StorageReceivedDeletedBlocks srdb = new StorageReceivedDeletedBlocks(new DatanodeStorage(ds.getStorageID()), rdbiList.toArray(new ReceivedDeletedBlockInfo[rdbiList.size()])); bm.processIncrementalBlockReport(node, srdb); // Make sure it's the first full report assertEquals(0, ds.getBlockReportCount()); bm.processReport(node, new DatanodeStorage(ds.getStorageID()), builder.build(), null, false); assertEquals(1, ds.getBlockReportCount()); // verify the storage info is correct assertTrue(bm.getStoredBlock(new Block(receivedBlockId)).findStorageInfo (ds) >= 0); assertTrue(((BlockInfoContiguousUnderConstruction) bm. getStoredBlock(new Block(receivingBlockId))).getNumExpectedLocations() > 0); assertTrue(bm.getStoredBlock(new Block(receivingReceivedBlockId)) .findStorageInfo(ds) >= 0); assertNull(bm.getStoredBlock(new Block(ReceivedDeletedBlockId))); assertTrue(bm.getStoredBlock(new Block(existedBlock)).findStorageInfo (ds) >= 0); }
Example 4
Source File: BlockReportTestBase.java From hadoop with Apache License 2.0 | 4 votes |
private static StorageBlockReport[] getBlockReports( DataNode dn, String bpid, boolean corruptOneBlockGs, boolean corruptOneBlockLen) { Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists = dn.getFSDataset().getBlockReports(bpid); // Send block report StorageBlockReport[] reports = new StorageBlockReport[perVolumeBlockLists.size()]; boolean corruptedGs = false; boolean corruptedLen = false; int reportIndex = 0; for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) { DatanodeStorage dnStorage = kvPair.getKey(); BlockListAsLongs blockList = kvPair.getValue(); // Walk the list of blocks until we find one each to corrupt the // generation stamp and length, if so requested. BlockListAsLongs.Builder builder = BlockListAsLongs.builder(); for (BlockReportReplica block : blockList) { if (corruptOneBlockGs && !corruptedGs) { long gsOld = block.getGenerationStamp(); long gsNew; do { gsNew = rand.nextInt(); } while (gsNew == gsOld); block.setGenerationStamp(gsNew); LOG.info("Corrupted the GS for block ID " + block); corruptedGs = true; } else if (corruptOneBlockLen && !corruptedLen) { long lenOld = block.getNumBytes(); long lenNew; do { lenNew = rand.nextInt((int)lenOld - 1); } while (lenNew == lenOld); block.setNumBytes(lenNew); LOG.info("Corrupted the length for block ID " + block); corruptedLen = true; } builder.add(new BlockReportReplica(block)); } reports[reportIndex++] = new StorageBlockReport(dnStorage, builder.build()); } return reports; }
Example 5
Source File: TestBlockManager.java From big-c with Apache License 2.0 | 4 votes |
/** * test when NN starts and in same mode, it receives an incremental blockReport * firstly. Then receives first full block report. */ @Test public void testSafeModeIBRBeforeFirstFullBR() throws Exception { // pretend to be in safemode doReturn(true).when(fsn).isInStartupSafeMode(); DatanodeDescriptor node = nodes.get(0); DatanodeStorageInfo ds = node.getStorageInfos()[0]; node.isAlive = true; DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, ""); // register new node bm.getDatanodeManager().registerDatanode(nodeReg); bm.getDatanodeManager().addDatanode(node); assertEquals(node, bm.getDatanodeManager().getDatanode(node)); assertEquals(0, ds.getBlockReportCount()); // Build a incremental report List<ReceivedDeletedBlockInfo> rdbiList = new ArrayList<>(); // Build a full report BlockListAsLongs.Builder builder = BlockListAsLongs.builder(); // blk_42 is finalized. long receivedBlockId = 42; // arbitrary BlockInfoContiguous receivedBlock = addBlockToBM(receivedBlockId); rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null)); builder.add(new FinalizedReplica(receivedBlock, null, null)); // blk_43 is under construction. long receivingBlockId = 43; BlockInfoContiguous receivingBlock = addUcBlockToBM(receivingBlockId); rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null)); builder.add(new ReplicaBeingWritten(receivingBlock, null, null, null)); // blk_44 has 2 records in IBR. It's finalized. So full BR has 1 record. long receivingReceivedBlockId = 44; BlockInfoContiguous receivingReceivedBlock = addBlockToBM(receivingReceivedBlockId); rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null)); rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null)); builder.add(new FinalizedReplica(receivingReceivedBlock, null, null)); // blk_45 is not in full BR, because it's deleted. long ReceivedDeletedBlockId = 45; rdbiList.add(new ReceivedDeletedBlockInfo( new Block(ReceivedDeletedBlockId), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null)); rdbiList.add(new ReceivedDeletedBlockInfo( new Block(ReceivedDeletedBlockId), ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null)); // blk_46 exists in DN for a long time, so it's in full BR, but not in IBR. long existedBlockId = 46; BlockInfoContiguous existedBlock = addBlockToBM(existedBlockId); builder.add(new FinalizedReplica(existedBlock, null, null)); // process IBR and full BR StorageReceivedDeletedBlocks srdb = new StorageReceivedDeletedBlocks(new DatanodeStorage(ds.getStorageID()), rdbiList.toArray(new ReceivedDeletedBlockInfo[rdbiList.size()])); bm.processIncrementalBlockReport(node, srdb); // Make sure it's the first full report assertEquals(0, ds.getBlockReportCount()); bm.processReport(node, new DatanodeStorage(ds.getStorageID()), builder.build(), null, false); assertEquals(1, ds.getBlockReportCount()); // verify the storage info is correct assertTrue(bm.getStoredBlock(new Block(receivedBlockId)).findStorageInfo (ds) >= 0); assertTrue(((BlockInfoContiguousUnderConstruction) bm. getStoredBlock(new Block(receivingBlockId))).getNumExpectedLocations() > 0); assertTrue(bm.getStoredBlock(new Block(receivingReceivedBlockId)) .findStorageInfo(ds) >= 0); assertNull(bm.getStoredBlock(new Block(ReceivedDeletedBlockId))); assertTrue(bm.getStoredBlock(new Block(existedBlock)).findStorageInfo (ds) >= 0); }
Example 6
Source File: BlockReportTestBase.java From big-c with Apache License 2.0 | 4 votes |
private static StorageBlockReport[] getBlockReports( DataNode dn, String bpid, boolean corruptOneBlockGs, boolean corruptOneBlockLen) { Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists = dn.getFSDataset().getBlockReports(bpid); // Send block report StorageBlockReport[] reports = new StorageBlockReport[perVolumeBlockLists.size()]; boolean corruptedGs = false; boolean corruptedLen = false; int reportIndex = 0; for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) { DatanodeStorage dnStorage = kvPair.getKey(); BlockListAsLongs blockList = kvPair.getValue(); // Walk the list of blocks until we find one each to corrupt the // generation stamp and length, if so requested. BlockListAsLongs.Builder builder = BlockListAsLongs.builder(); for (BlockReportReplica block : blockList) { if (corruptOneBlockGs && !corruptedGs) { long gsOld = block.getGenerationStamp(); long gsNew; do { gsNew = rand.nextInt(); } while (gsNew == gsOld); block.setGenerationStamp(gsNew); LOG.info("Corrupted the GS for block ID " + block); corruptedGs = true; } else if (corruptOneBlockLen && !corruptedLen) { long lenOld = block.getNumBytes(); long lenNew; do { lenNew = rand.nextInt((int)lenOld - 1); } while (lenNew == lenOld); block.setNumBytes(lenNew); LOG.info("Corrupted the length for block ID " + block); corruptedLen = true; } builder.add(new BlockReportReplica(block)); } reports[reportIndex++] = new StorageBlockReport(dnStorage, builder.build()); } return reports; }