Java Code Examples for org.apache.hadoop.hdfs.protocol.ExtendedBlock#getBlockPoolId()
The following examples show how to use
org.apache.hadoop.hdfs.protocol.ExtendedBlock#getBlockPoolId() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SimulatedFSDataset.java From big-c with Apache License 2.0 | 6 votes |
@Override // FsDatasetSpi public synchronized ReplicaHandler createTemporary( StorageType storageType, ExtendedBlock b) throws IOException { if (isValidBlock(b)) { throw new ReplicaAlreadyExistsException("Block " + b + " is valid, and cannot be written to."); } if (isValidRbw(b)) { throw new ReplicaAlreadyExistsException("Block " + b + " is being written, and cannot be written to."); } final Map<Block, BInfo> map = getMap(b.getBlockPoolId()); BInfo binfo = new BInfo(b.getBlockPoolId(), b.getLocalBlock(), true); map.put(binfo.theBlock, binfo); return new ReplicaHandler(binfo, null); }
Example 2
Source File: TestBalancerWithMultipleNameNodes.java From big-c with Apache License 2.0 | 6 votes |
private static ExtendedBlock[][] generateBlocks(Suite s, long size ) throws IOException, InterruptedException, TimeoutException { final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][]; for(int n = 0; n < s.clients.length; n++) { final long fileLen = size/s.replication; createFile(s, n, fileLen); final List<LocatedBlock> locatedBlocks = s.clients[n].getBlockLocations( FILE_NAME, 0, fileLen).getLocatedBlocks(); final int numOfBlocks = locatedBlocks.size(); blocks[n] = new ExtendedBlock[numOfBlocks]; for(int i = 0; i < numOfBlocks; i++) { final ExtendedBlock b = locatedBlocks.get(i).getBlock(); blocks[n][i] = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(), b.getNumBytes(), b.getGenerationStamp()); } } return blocks; }
Example 3
Source File: SimulatedFSDataset.java From hadoop with Apache License 2.0 | 6 votes |
@Override // FsDatasetSpi public synchronized ReplicaHandler createTemporary( StorageType storageType, ExtendedBlock b) throws IOException { if (isValidBlock(b)) { throw new ReplicaAlreadyExistsException("Block " + b + " is valid, and cannot be written to."); } if (isValidRbw(b)) { throw new ReplicaAlreadyExistsException("Block " + b + " is being written, and cannot be written to."); } final Map<Block, BInfo> map = getMap(b.getBlockPoolId()); BInfo binfo = new BInfo(b.getBlockPoolId(), b.getLocalBlock(), true); map.put(binfo.theBlock, binfo); return new ReplicaHandler(binfo, null); }
Example 4
Source File: TestBPOfferService.java From hadoop with Apache License 2.0 | 6 votes |
private ReceivedDeletedBlockInfo[] waitForBlockReceived( final ExtendedBlock fakeBlock, final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception { final String fakeBlockPoolId = fakeBlock.getBlockPoolId(); final ArgumentCaptor<StorageReceivedDeletedBlocks[]> captor = ArgumentCaptor.forClass(StorageReceivedDeletedBlocks[].class); GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { try { Mockito.verify(mockNN).blockReceivedAndDeleted( Mockito.<DatanodeRegistration>anyObject(), Mockito.eq(fakeBlockPoolId), captor.capture()); return true; } catch (Throwable t) { return false; } } }, 100, 10000); return captor.getValue()[0].getBlocks(); }
Example 5
Source File: TestBalancerWithMultipleNameNodes.java From hadoop with Apache License 2.0 | 6 votes |
private static ExtendedBlock[][] generateBlocks(Suite s, long size ) throws IOException, InterruptedException, TimeoutException { final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][]; for(int n = 0; n < s.clients.length; n++) { final long fileLen = size/s.replication; createFile(s, n, fileLen); final List<LocatedBlock> locatedBlocks = s.clients[n].getBlockLocations( FILE_NAME, 0, fileLen).getLocatedBlocks(); final int numOfBlocks = locatedBlocks.size(); blocks[n] = new ExtendedBlock[numOfBlocks]; for(int i = 0; i < numOfBlocks; i++) { final ExtendedBlock b = locatedBlocks.get(i).getBlock(); blocks[n][i] = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(), b.getNumBytes(), b.getGenerationStamp()); } } return blocks; }
Example 6
Source File: TestBPOfferService.java From big-c with Apache License 2.0 | 6 votes |
private ReceivedDeletedBlockInfo[] waitForBlockReceived( final ExtendedBlock fakeBlock, final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception { final String fakeBlockPoolId = fakeBlock.getBlockPoolId(); final ArgumentCaptor<StorageReceivedDeletedBlocks[]> captor = ArgumentCaptor.forClass(StorageReceivedDeletedBlocks[].class); GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { try { Mockito.verify(mockNN).blockReceivedAndDeleted( Mockito.<DatanodeRegistration>anyObject(), Mockito.eq(fakeBlockPoolId), captor.capture()); return true; } catch (Throwable t) { return false; } } }, 100, 10000); return captor.getValue()[0].getBlocks(); }
Example 7
Source File: DataNode.java From hadoop with Apache License 2.0 | 5 votes |
/** * Return the BPOfferService instance corresponding to the given block. * @return the BPOS * @throws IOException if no such BPOS can be found */ private BPOfferService getBPOSForBlock(ExtendedBlock block) throws IOException { Preconditions.checkNotNull(block); BPOfferService bpos = blockPoolManager.get(block.getBlockPoolId()); if (bpos == null) { throw new IOException("cannot locate OfferService thread for bp="+ block.getBlockPoolId()); } return bpos; }
Example 8
Source File: RemoteBlockReader2.java From hadoop with Apache License 2.0 | 5 votes |
static void checkSuccess( BlockOpResponseProto status, Peer peer, ExtendedBlock block, String file) throws IOException { String logInfo = "for OP_READ_BLOCK" + ", self=" + peer.getLocalAddressString() + ", remote=" + peer.getRemoteAddressString() + ", for file " + file + ", for pool " + block.getBlockPoolId() + " block " + block.getBlockId() + "_" + block.getGenerationStamp(); DataTransferProtoUtil.checkBlockOpStatus(status, logInfo); }
Example 9
Source File: BlockTokenSecretManager.java From big-c with Apache License 2.0 | 5 votes |
/** Generate a block token for a specified user */ public Token<BlockTokenIdentifier> generateToken(String userId, ExtendedBlock block, EnumSet<AccessMode> modes) throws IOException { BlockTokenIdentifier id = new BlockTokenIdentifier(userId, block .getBlockPoolId(), block.getBlockId(), modes); return new Token<BlockTokenIdentifier>(id, this); }
Example 10
Source File: RemoteBlockReader2.java From big-c with Apache License 2.0 | 5 votes |
static void checkSuccess( BlockOpResponseProto status, Peer peer, ExtendedBlock block, String file) throws IOException { String logInfo = "for OP_READ_BLOCK" + ", self=" + peer.getLocalAddressString() + ", remote=" + peer.getRemoteAddressString() + ", for file " + file + ", for pool " + block.getBlockPoolId() + " block " + block.getBlockId() + "_" + block.getGenerationStamp(); DataTransferProtoUtil.checkBlockOpStatus(status, logInfo); }
Example 11
Source File: DataNode.java From big-c with Apache License 2.0 | 5 votes |
/** * Return the BPOfferService instance corresponding to the given block. * @return the BPOS * @throws IOException if no such BPOS can be found */ private BPOfferService getBPOSForBlock(ExtendedBlock block) throws IOException { Preconditions.checkNotNull(block); BPOfferService bpos = blockPoolManager.get(block.getBlockPoolId()); if (bpos == null) { throw new IOException("cannot locate OfferService thread for bp="+ block.getBlockPoolId()); } return bpos; }
Example 12
Source File: ExtendedBlockId.java From hadoop with Apache License 2.0 | 4 votes |
public static ExtendedBlockId fromExtendedBlock(ExtendedBlock block) { return new ExtendedBlockId(block.getBlockId(), block.getBlockPoolId()); }
Example 13
Source File: RemoteBlockReader2.java From hadoop with Apache License 2.0 | 4 votes |
/** * Create a new BlockReader specifically to satisfy a read. * This method also sends the OP_READ_BLOCK request. * * @param file File location * @param block The block object * @param blockToken The block token for security * @param startOffset The read offset, relative to block head * @param len The number of bytes to read * @param verifyChecksum Whether to verify checksum * @param clientName Client name * @param peer The Peer to use * @param datanodeID The DatanodeID this peer is connected to * @return New BlockReader instance, or null on error. */ public static BlockReader newBlockReader(String file, ExtendedBlock block, Token<BlockTokenIdentifier> blockToken, long startOffset, long len, boolean verifyChecksum, String clientName, Peer peer, DatanodeID datanodeID, PeerCache peerCache, CachingStrategy cachingStrategy) throws IOException { // in and out will be closed when sock is closed (by the caller) final DataOutputStream out = new DataOutputStream(new BufferedOutputStream( peer.getOutputStream())); new Sender(out).readBlock(block, blockToken, clientName, startOffset, len, verifyChecksum, cachingStrategy); // // Get bytes in block // DataInputStream in = new DataInputStream(peer.getInputStream()); BlockOpResponseProto status = BlockOpResponseProto.parseFrom( PBHelper.vintPrefixed(in)); checkSuccess(status, peer, block, file); ReadOpChecksumInfoProto checksumInfo = status.getReadOpChecksumInfo(); DataChecksum checksum = DataTransferProtoUtil.fromProto( checksumInfo.getChecksum()); //Warning when we get CHECKSUM_NULL? // Read the first chunk offset. long firstChunkOffset = checksumInfo.getChunkOffset(); if ( firstChunkOffset < 0 || firstChunkOffset > startOffset || firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) { throw new IOException("BlockReader: error in first chunk offset (" + firstChunkOffset + ") startOffset is " + startOffset + " for file " + file); } return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(), checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer, datanodeID, peerCache); }
Example 14
Source File: RemoteBlockReader.java From hadoop with Apache License 2.0 | 4 votes |
/** * Create a new BlockReader specifically to satisfy a read. * This method also sends the OP_READ_BLOCK request. * * @param file File location * @param block The block object * @param blockToken The block token for security * @param startOffset The read offset, relative to block head * @param len The number of bytes to read * @param bufferSize The IO buffer size (not the client buffer size) * @param verifyChecksum Whether to verify checksum * @param clientName Client name * @return New BlockReader instance, or null on error. */ public static RemoteBlockReader newBlockReader(String file, ExtendedBlock block, Token<BlockTokenIdentifier> blockToken, long startOffset, long len, int bufferSize, boolean verifyChecksum, String clientName, Peer peer, DatanodeID datanodeID, PeerCache peerCache, CachingStrategy cachingStrategy) throws IOException { // in and out will be closed when sock is closed (by the caller) final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(peer.getOutputStream())); new Sender(out).readBlock(block, blockToken, clientName, startOffset, len, verifyChecksum, cachingStrategy); // // Get bytes in block, set streams // DataInputStream in = new DataInputStream( new BufferedInputStream(peer.getInputStream(), bufferSize)); BlockOpResponseProto status = BlockOpResponseProto.parseFrom( PBHelper.vintPrefixed(in)); RemoteBlockReader2.checkSuccess(status, peer, block, file); ReadOpChecksumInfoProto checksumInfo = status.getReadOpChecksumInfo(); DataChecksum checksum = DataTransferProtoUtil.fromProto( checksumInfo.getChecksum()); //Warning when we get CHECKSUM_NULL? // Read the first chunk offset. long firstChunkOffset = checksumInfo.getChunkOffset(); if ( firstChunkOffset < 0 || firstChunkOffset > startOffset || firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) { throw new IOException("BlockReader: error in first chunk offset (" + firstChunkOffset + ") startOffset is " + startOffset + " for file " + file); } return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(), in, checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer, datanodeID, peerCache); }
Example 15
Source File: TestInterDatanodeProtocol.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test for * {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)} * */ @Test public void testUpdateReplicaUnderRecovery() throws IOException { MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); String bpid = cluster.getNamesystem().getBlockPoolId(); //create a file DistributedFileSystem dfs = cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L); //get block info final LocatedBlock locatedblock = getLastLocatedBlock( DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr); final DatanodeInfo[] datanodeinfo = locatedblock.getLocations(); Assert.assertTrue(datanodeinfo.length > 0); //get DataNode and FSDataset objects final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort()); Assert.assertTrue(datanode != null); //initReplicaRecovery final ExtendedBlock b = locatedblock.getBlock(); final long recoveryid = b.getGenerationStamp() + 1; final long newlength = b.getNumBytes() - 1; final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode); final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery( new RecoveringBlock(b, null, recoveryid)); //check replica final ReplicaInfo replica = FsDatasetTestUtil.fetchReplicaInfo( fsdataset, bpid, b.getBlockId()); Assert.assertEquals(ReplicaState.RUR, replica.getState()); //check meta data before update FsDatasetImpl.checkReplicaFiles(replica); //case "THIS IS NOT SUPPOSED TO HAPPEN" //with (block length) != (stored replica's on disk length). { //create a block with same id and gs but different length. final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri .getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp()); try { //update should fail fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, tmp.getBlockId(), newlength); Assert.fail(); } catch(IOException ioe) { System.out.println("GOOD: getting " + ioe); } } //update final String storageID = fsdataset.updateReplicaUnderRecovery( new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, rri.getBlockId(), newlength); assertTrue(storageID != null); } finally { if (cluster != null) cluster.shutdown(); } }
Example 16
Source File: TestInterDatanodeProtocol.java From big-c with Apache License 2.0 | 4 votes |
/** * Test for * {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)} * */ @Test public void testUpdateReplicaUnderRecovery() throws IOException { MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); String bpid = cluster.getNamesystem().getBlockPoolId(); //create a file DistributedFileSystem dfs = cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L); //get block info final LocatedBlock locatedblock = getLastLocatedBlock( DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr); final DatanodeInfo[] datanodeinfo = locatedblock.getLocations(); Assert.assertTrue(datanodeinfo.length > 0); //get DataNode and FSDataset objects final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort()); Assert.assertTrue(datanode != null); //initReplicaRecovery final ExtendedBlock b = locatedblock.getBlock(); final long recoveryid = b.getGenerationStamp() + 1; final long newlength = b.getNumBytes() - 1; final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode); final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery( new RecoveringBlock(b, null, recoveryid)); //check replica final ReplicaInfo replica = FsDatasetTestUtil.fetchReplicaInfo( fsdataset, bpid, b.getBlockId()); Assert.assertEquals(ReplicaState.RUR, replica.getState()); //check meta data before update FsDatasetImpl.checkReplicaFiles(replica); //case "THIS IS NOT SUPPOSED TO HAPPEN" //with (block length) != (stored replica's on disk length). { //create a block with same id and gs but different length. final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri .getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp()); try { //update should fail fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, tmp.getBlockId(), newlength); Assert.fail(); } catch(IOException ioe) { System.out.println("GOOD: getting " + ioe); } } //update final String storageID = fsdataset.updateReplicaUnderRecovery( new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, rri.getBlockId(), newlength); assertTrue(storageID != null); } finally { if (cluster != null) cluster.shutdown(); } }
Example 17
Source File: RemoteBlockReader2.java From big-c with Apache License 2.0 | 4 votes |
/** * Create a new BlockReader specifically to satisfy a read. * This method also sends the OP_READ_BLOCK request. * * @param file File location * @param block The block object * @param blockToken The block token for security * @param startOffset The read offset, relative to block head * @param len The number of bytes to read * @param verifyChecksum Whether to verify checksum * @param clientName Client name * @param peer The Peer to use * @param datanodeID The DatanodeID this peer is connected to * @return New BlockReader instance, or null on error. */ public static BlockReader newBlockReader(String file, ExtendedBlock block, Token<BlockTokenIdentifier> blockToken, long startOffset, long len, boolean verifyChecksum, String clientName, Peer peer, DatanodeID datanodeID, PeerCache peerCache, CachingStrategy cachingStrategy) throws IOException { // in and out will be closed when sock is closed (by the caller) final DataOutputStream out = new DataOutputStream(new BufferedOutputStream( peer.getOutputStream())); new Sender(out).readBlock(block, blockToken, clientName, startOffset, len, verifyChecksum, cachingStrategy); // // Get bytes in block // DataInputStream in = new DataInputStream(peer.getInputStream()); BlockOpResponseProto status = BlockOpResponseProto.parseFrom( PBHelper.vintPrefixed(in)); checkSuccess(status, peer, block, file); ReadOpChecksumInfoProto checksumInfo = status.getReadOpChecksumInfo(); DataChecksum checksum = DataTransferProtoUtil.fromProto( checksumInfo.getChecksum()); //Warning when we get CHECKSUM_NULL? // Read the first chunk offset. long firstChunkOffset = checksumInfo.getChunkOffset(); if ( firstChunkOffset < 0 || firstChunkOffset > startOffset || firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) { throw new IOException("BlockReader: error in first chunk offset (" + firstChunkOffset + ") startOffset is " + startOffset + " for file " + file); } return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(), checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer, datanodeID, peerCache); }
Example 18
Source File: ExtendedBlockId.java From big-c with Apache License 2.0 | 4 votes |
public static ExtendedBlockId fromExtendedBlock(ExtendedBlock block) { return new ExtendedBlockId(block.getBlockId(), block.getBlockPoolId()); }
Example 19
Source File: DataNode.java From big-c with Apache License 2.0 | 4 votes |
/** Recover a block */ private void recoverBlock(RecoveringBlock rBlock) throws IOException { ExtendedBlock block = rBlock.getBlock(); String blookPoolId = block.getBlockPoolId(); DatanodeID[] datanodeids = rBlock.getLocations(); List<BlockRecord> syncList = new ArrayList<BlockRecord>(datanodeids.length); int errorCount = 0; //check generation stamps for(DatanodeID id : datanodeids) { try { BPOfferService bpos = blockPoolManager.get(blookPoolId); DatanodeRegistration bpReg = bpos.bpRegistration; InterDatanodeProtocol datanode = bpReg.equals(id)? this: DataNode.createInterDataNodeProtocolProxy(id, getConf(), dnConf.socketTimeout, dnConf.connectToDnViaHostname); ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock); if (info != null && info.getGenerationStamp() >= block.getGenerationStamp() && info.getNumBytes() > 0) { syncList.add(new BlockRecord(id, datanode, info)); } } catch (RecoveryInProgressException ripE) { InterDatanodeProtocol.LOG.warn( "Recovery for replica " + block + " on data-node " + id + " is already in progress. Recovery id = " + rBlock.getNewGenerationStamp() + " is aborted.", ripE); return; } catch (IOException e) { ++errorCount; InterDatanodeProtocol.LOG.warn( "Failed to obtain replica info for block (=" + block + ") from datanode (=" + id + ")", e); } } if (errorCount == datanodeids.length) { throw new IOException("All datanodes failed: block=" + block + ", datanodeids=" + Arrays.asList(datanodeids)); } syncBlock(rBlock, syncList); }
Example 20
Source File: TestInterDatanodeProtocol.java From big-c with Apache License 2.0 | 4 votes |
/** * The following test first creates a file. * It verifies the block information from a datanode. * Then, it updates the block with new information and verifies again. * @param useDnHostname whether DNs should connect to other DNs by hostname */ private void checkBlockMetaDataInfo(boolean useDnHostname) throws Exception { MiniDFSCluster cluster = null; conf.setBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, useDnHostname); if (useDnHostname) { // Since the mini cluster only listens on the loopback we have to // ensure the hostname used to access DNs maps to the loopback. We // do this by telling the DN to advertise localhost as its hostname // instead of the default hostname. conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost"); } try { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(3) .checkDataNodeHostConfig(true) .build(); cluster.waitActive(); //create a file DistributedFileSystem dfs = cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L); assertTrue(dfs.exists(filepath)); //get block info LocatedBlock locatedblock = getLastLocatedBlock( DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr); DatanodeInfo[] datanodeinfo = locatedblock.getLocations(); assertTrue(datanodeinfo.length > 0); //connect to a data node DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort()); InterDatanodeProtocol idp = DataNodeTestUtils.createInterDatanodeProtocolProxy( datanode, datanodeinfo[0], conf, useDnHostname); // Stop the block scanners. datanode.getBlockScanner().removeAllVolumeScanners(); //verify BlockMetaDataInfo ExtendedBlock b = locatedblock.getBlock(); InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass()); checkMetaInfo(b, datanode); long recoveryId = b.getGenerationStamp() + 1; idp.initReplicaRecovery( new RecoveringBlock(b, locatedblock.getLocations(), recoveryId)); //verify updateBlock ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1); idp.updateReplicaUnderRecovery(b, recoveryId, b.getBlockId(), newblock.getNumBytes()); checkMetaInfo(newblock, datanode); // Verify correct null response trying to init recovery for a missing block ExtendedBlock badBlock = new ExtendedBlock("fake-pool", b.getBlockId(), 0, 0); assertNull(idp.initReplicaRecovery( new RecoveringBlock(badBlock, locatedblock.getLocations(), recoveryId))); } finally { if (cluster != null) {cluster.shutdown();} } }