Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster#getDataNode()
The following examples show how to use
org.apache.hadoop.hdfs.MiniDFSCluster#getDataNode() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestPipelinesFailover.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test the scenario where the NN fails over after issuing a block * synchronization request, but before it is committed. The * DN running the recovery should then fail to commit the synchronization * and a later retry will succeed. */ @Test(timeout=30000) public void testFailoverRightBeforeCommitSynchronization() throws Exception { final Configuration conf = new Configuration(); // Disable permissions so that another user can recover the lease. conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); FSDataOutputStream stm = null; final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(3) .build(); try { cluster.waitActive(); cluster.transitionToActive(0); Thread.sleep(500); LOG.info("Starting with NN 0 active"); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); stm = fs.create(TEST_PATH); // write a half block AppendTestUtil.write(stm, 0, BLOCK_SIZE / 2); stm.hflush(); // Look into the block manager on the active node for the block // under construction. NameNode nn0 = cluster.getNameNode(0); ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, TEST_PATH); DatanodeDescriptor expectedPrimary = DFSTestUtil.getExpectedPrimaryNode(nn0, blk); LOG.info("Expecting block recovery to be triggered on DN " + expectedPrimary); // Find the corresponding DN daemon, and spy on its connection to the // active. DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort()); DatanodeProtocolClientSideTranslatorPB nnSpy = DataNodeTestUtils.spyOnBposToNN(primaryDN, nn0); // Delay the commitBlockSynchronization call DelayAnswer delayer = new DelayAnswer(LOG); Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization( Mockito.eq(blk), Mockito.anyInt(), // new genstamp Mockito.anyLong(), // new length Mockito.eq(true), // close file Mockito.eq(false), // delete block (DatanodeID[]) Mockito.anyObject(), // new targets (String[]) Mockito.anyObject()); // new target storages DistributedFileSystem fsOtherUser = createFsAsOtherUser(cluster, conf); assertFalse(fsOtherUser.recoverLease(TEST_PATH)); LOG.info("Waiting for commitBlockSynchronization call from primary"); delayer.waitForCall(); LOG.info("Failing over to NN 1"); cluster.transitionToStandby(0); cluster.transitionToActive(1); // Let the commitBlockSynchronization call go through, and check that // it failed with the correct exception. delayer.proceed(); delayer.waitForResult(); Throwable t = delayer.getThrown(); if (t == null) { fail("commitBlockSynchronization call did not fail on standby"); } GenericTestUtils.assertExceptionContains( "Operation category WRITE is not supported", t); // Now, if we try again to recover the block, it should succeed on the new // active. loopRecoverLease(fsOtherUser, TEST_PATH); AppendTestUtil.check(fs, TEST_PATH, BLOCK_SIZE/2); } finally { IOUtils.closeStream(stm); cluster.shutdown(); } }
Example 2
Source File: TestInterDatanodeProtocol.java From hadoop with Apache License 2.0 | 4 votes |
/** * The following test first creates a file. * It verifies the block information from a datanode. * Then, it updates the block with new information and verifies again. * @param useDnHostname whether DNs should connect to other DNs by hostname */ private void checkBlockMetaDataInfo(boolean useDnHostname) throws Exception { MiniDFSCluster cluster = null; conf.setBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, useDnHostname); if (useDnHostname) { // Since the mini cluster only listens on the loopback we have to // ensure the hostname used to access DNs maps to the loopback. We // do this by telling the DN to advertise localhost as its hostname // instead of the default hostname. conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost"); } try { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(3) .checkDataNodeHostConfig(true) .build(); cluster.waitActive(); //create a file DistributedFileSystem dfs = cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L); assertTrue(dfs.exists(filepath)); //get block info LocatedBlock locatedblock = getLastLocatedBlock( DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr); DatanodeInfo[] datanodeinfo = locatedblock.getLocations(); assertTrue(datanodeinfo.length > 0); //connect to a data node DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort()); InterDatanodeProtocol idp = DataNodeTestUtils.createInterDatanodeProtocolProxy( datanode, datanodeinfo[0], conf, useDnHostname); // Stop the block scanners. datanode.getBlockScanner().removeAllVolumeScanners(); //verify BlockMetaDataInfo ExtendedBlock b = locatedblock.getBlock(); InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass()); checkMetaInfo(b, datanode); long recoveryId = b.getGenerationStamp() + 1; idp.initReplicaRecovery( new RecoveringBlock(b, locatedblock.getLocations(), recoveryId)); //verify updateBlock ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1); idp.updateReplicaUnderRecovery(b, recoveryId, b.getBlockId(), newblock.getNumBytes()); checkMetaInfo(newblock, datanode); // Verify correct null response trying to init recovery for a missing block ExtendedBlock badBlock = new ExtendedBlock("fake-pool", b.getBlockId(), 0, 0); assertNull(idp.initReplicaRecovery( new RecoveringBlock(badBlock, locatedblock.getLocations(), recoveryId))); } finally { if (cluster != null) {cluster.shutdown();} } }
Example 3
Source File: TestInterDatanodeProtocol.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test for * {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)} * */ @Test public void testUpdateReplicaUnderRecovery() throws IOException { MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); String bpid = cluster.getNamesystem().getBlockPoolId(); //create a file DistributedFileSystem dfs = cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L); //get block info final LocatedBlock locatedblock = getLastLocatedBlock( DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr); final DatanodeInfo[] datanodeinfo = locatedblock.getLocations(); Assert.assertTrue(datanodeinfo.length > 0); //get DataNode and FSDataset objects final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort()); Assert.assertTrue(datanode != null); //initReplicaRecovery final ExtendedBlock b = locatedblock.getBlock(); final long recoveryid = b.getGenerationStamp() + 1; final long newlength = b.getNumBytes() - 1; final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode); final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery( new RecoveringBlock(b, null, recoveryid)); //check replica final ReplicaInfo replica = FsDatasetTestUtil.fetchReplicaInfo( fsdataset, bpid, b.getBlockId()); Assert.assertEquals(ReplicaState.RUR, replica.getState()); //check meta data before update FsDatasetImpl.checkReplicaFiles(replica); //case "THIS IS NOT SUPPOSED TO HAPPEN" //with (block length) != (stored replica's on disk length). { //create a block with same id and gs but different length. final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri .getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp()); try { //update should fail fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, tmp.getBlockId(), newlength); Assert.fail(); } catch(IOException ioe) { System.out.println("GOOD: getting " + ioe); } } //update final String storageID = fsdataset.updateReplicaUnderRecovery( new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, rri.getBlockId(), newlength); assertTrue(storageID != null); } finally { if (cluster != null) cluster.shutdown(); } }
Example 4
Source File: TestPipelinesFailover.java From big-c with Apache License 2.0 | 4 votes |
/** * Test the scenario where the NN fails over after issuing a block * synchronization request, but before it is committed. The * DN running the recovery should then fail to commit the synchronization * and a later retry will succeed. */ @Test(timeout=30000) public void testFailoverRightBeforeCommitSynchronization() throws Exception { final Configuration conf = new Configuration(); // Disable permissions so that another user can recover the lease. conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); FSDataOutputStream stm = null; final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(3) .build(); try { cluster.waitActive(); cluster.transitionToActive(0); Thread.sleep(500); LOG.info("Starting with NN 0 active"); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); stm = fs.create(TEST_PATH); // write a half block AppendTestUtil.write(stm, 0, BLOCK_SIZE / 2); stm.hflush(); // Look into the block manager on the active node for the block // under construction. NameNode nn0 = cluster.getNameNode(0); ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, TEST_PATH); DatanodeDescriptor expectedPrimary = DFSTestUtil.getExpectedPrimaryNode(nn0, blk); LOG.info("Expecting block recovery to be triggered on DN " + expectedPrimary); // Find the corresponding DN daemon, and spy on its connection to the // active. DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort()); DatanodeProtocolClientSideTranslatorPB nnSpy = DataNodeTestUtils.spyOnBposToNN(primaryDN, nn0); // Delay the commitBlockSynchronization call DelayAnswer delayer = new DelayAnswer(LOG); Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization( Mockito.eq(blk), Mockito.anyInt(), // new genstamp Mockito.anyLong(), // new length Mockito.eq(true), // close file Mockito.eq(false), // delete block (DatanodeID[]) Mockito.anyObject(), // new targets (String[]) Mockito.anyObject()); // new target storages DistributedFileSystem fsOtherUser = createFsAsOtherUser(cluster, conf); assertFalse(fsOtherUser.recoverLease(TEST_PATH)); LOG.info("Waiting for commitBlockSynchronization call from primary"); delayer.waitForCall(); LOG.info("Failing over to NN 1"); cluster.transitionToStandby(0); cluster.transitionToActive(1); // Let the commitBlockSynchronization call go through, and check that // it failed with the correct exception. delayer.proceed(); delayer.waitForResult(); Throwable t = delayer.getThrown(); if (t == null) { fail("commitBlockSynchronization call did not fail on standby"); } GenericTestUtils.assertExceptionContains( "Operation category WRITE is not supported", t); // Now, if we try again to recover the block, it should succeed on the new // active. loopRecoverLease(fsOtherUser, TEST_PATH); AppendTestUtil.check(fs, TEST_PATH, BLOCK_SIZE/2); } finally { IOUtils.closeStream(stm); cluster.shutdown(); } }
Example 5
Source File: TestInterDatanodeProtocol.java From big-c with Apache License 2.0 | 4 votes |
/** * The following test first creates a file. * It verifies the block information from a datanode. * Then, it updates the block with new information and verifies again. * @param useDnHostname whether DNs should connect to other DNs by hostname */ private void checkBlockMetaDataInfo(boolean useDnHostname) throws Exception { MiniDFSCluster cluster = null; conf.setBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, useDnHostname); if (useDnHostname) { // Since the mini cluster only listens on the loopback we have to // ensure the hostname used to access DNs maps to the loopback. We // do this by telling the DN to advertise localhost as its hostname // instead of the default hostname. conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost"); } try { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(3) .checkDataNodeHostConfig(true) .build(); cluster.waitActive(); //create a file DistributedFileSystem dfs = cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L); assertTrue(dfs.exists(filepath)); //get block info LocatedBlock locatedblock = getLastLocatedBlock( DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr); DatanodeInfo[] datanodeinfo = locatedblock.getLocations(); assertTrue(datanodeinfo.length > 0); //connect to a data node DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort()); InterDatanodeProtocol idp = DataNodeTestUtils.createInterDatanodeProtocolProxy( datanode, datanodeinfo[0], conf, useDnHostname); // Stop the block scanners. datanode.getBlockScanner().removeAllVolumeScanners(); //verify BlockMetaDataInfo ExtendedBlock b = locatedblock.getBlock(); InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass()); checkMetaInfo(b, datanode); long recoveryId = b.getGenerationStamp() + 1; idp.initReplicaRecovery( new RecoveringBlock(b, locatedblock.getLocations(), recoveryId)); //verify updateBlock ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1); idp.updateReplicaUnderRecovery(b, recoveryId, b.getBlockId(), newblock.getNumBytes()); checkMetaInfo(newblock, datanode); // Verify correct null response trying to init recovery for a missing block ExtendedBlock badBlock = new ExtendedBlock("fake-pool", b.getBlockId(), 0, 0); assertNull(idp.initReplicaRecovery( new RecoveringBlock(badBlock, locatedblock.getLocations(), recoveryId))); } finally { if (cluster != null) {cluster.shutdown();} } }
Example 6
Source File: TestInterDatanodeProtocol.java From big-c with Apache License 2.0 | 4 votes |
/** * Test for * {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)} * */ @Test public void testUpdateReplicaUnderRecovery() throws IOException { MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); String bpid = cluster.getNamesystem().getBlockPoolId(); //create a file DistributedFileSystem dfs = cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L); //get block info final LocatedBlock locatedblock = getLastLocatedBlock( DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr); final DatanodeInfo[] datanodeinfo = locatedblock.getLocations(); Assert.assertTrue(datanodeinfo.length > 0); //get DataNode and FSDataset objects final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort()); Assert.assertTrue(datanode != null); //initReplicaRecovery final ExtendedBlock b = locatedblock.getBlock(); final long recoveryid = b.getGenerationStamp() + 1; final long newlength = b.getNumBytes() - 1; final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode); final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery( new RecoveringBlock(b, null, recoveryid)); //check replica final ReplicaInfo replica = FsDatasetTestUtil.fetchReplicaInfo( fsdataset, bpid, b.getBlockId()); Assert.assertEquals(ReplicaState.RUR, replica.getState()); //check meta data before update FsDatasetImpl.checkReplicaFiles(replica); //case "THIS IS NOT SUPPOSED TO HAPPEN" //with (block length) != (stored replica's on disk length). { //create a block with same id and gs but different length. final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri .getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp()); try { //update should fail fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, tmp.getBlockId(), newlength); Assert.fail(); } catch(IOException ioe) { System.out.println("GOOD: getting " + ioe); } } //update final String storageID = fsdataset.updateReplicaUnderRecovery( new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, rri.getBlockId(), newlength); assertTrue(storageID != null); } finally { if (cluster != null) cluster.shutdown(); } }