Java Code Examples for org.apache.hadoop.hdfs.server.datanode.DataNode#createInterDataNodeProtocolProxy()
The following examples show how to use
org.apache.hadoop.hdfs.server.datanode.DataNode#createInterDataNodeProtocolProxy() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestInterDatanodeProtocol.java From hadoop with Apache License 2.0 | 6 votes |
/** Test to verify that InterDatanode RPC timesout as expected when * the server DN does not respond. */ @Test(expected=SocketTimeoutException.class) public void testInterDNProtocolTimeout() throws Throwable { final Server server = new TestServer(1, true); server.start(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); DatanodeInfo dInfo = new DatanodeInfo(fakeDnId); InterDatanodeProtocol proxy = null; try { proxy = DataNode.createInterDataNodeProtocolProxy( dInfo, conf, 500, false); proxy.initReplicaRecovery(new RecoveringBlock( new ExtendedBlock("bpid", 1), null, 100)); fail ("Expected SocketTimeoutException exception, but did not get."); } finally { if (proxy != null) { RPC.stopProxy(proxy); } server.stop(); } }
Example 2
Source File: TestInterDatanodeProtocol.java From big-c with Apache License 2.0 | 6 votes |
/** Test to verify that InterDatanode RPC timesout as expected when * the server DN does not respond. */ @Test(expected=SocketTimeoutException.class) public void testInterDNProtocolTimeout() throws Throwable { final Server server = new TestServer(1, true); server.start(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); DatanodeInfo dInfo = new DatanodeInfo(fakeDnId); InterDatanodeProtocol proxy = null; try { proxy = DataNode.createInterDataNodeProtocolProxy( dInfo, conf, 500, false); proxy.initReplicaRecovery(new RecoveringBlock( new ExtendedBlock("bpid", 1), null, 100)); fail ("Expected SocketTimeoutException exception, but did not get."); } finally { if (proxy != null) { RPC.stopProxy(proxy); } server.stop(); } }
Example 3
Source File: TestLeaseRecovery.java From RDFS with Apache License 2.0 | 4 votes |
/** * The following test first creates a file with a few blocks. * It randomly truncates the replica of the last block stored in each datanode. * Finally, it triggers block synchronization to synchronize all stored block. * @param forceOneBlockToZero if true, will truncate one block to 0 length */ public void runTestBlockSynchronization(boolean forceOneBlockToZero) throws Exception { final int ORG_FILE_SIZE = 3000; //create a file DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L); assertTrue(dfs.dfs.exists(filestr)); DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM); //get block info for the last block LocatedBlockWithMetaInfo locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock( dfs.dfs.namenode, filestr); int namespaceId = locatedblock.getNamespaceID(); DatanodeInfo[] datanodeinfos = locatedblock.getLocations(); assertEquals(REPLICATION_NUM, datanodeinfos.length); //connect to data nodes InterDatanodeProtocol[] idps = new InterDatanodeProtocol[REPLICATION_NUM]; DataNode[] datanodes = new DataNode[REPLICATION_NUM]; for(int i = 0; i < REPLICATION_NUM; i++) { idps[i] = DataNode.createInterDataNodeProtocolProxy( datanodeinfos[i], conf, 0); datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort()); assertTrue(datanodes[i] != null); } //verify BlockMetaDataInfo Block lastblock = locatedblock.getBlock(); DataNode.LOG.info("newblocks=" + lastblock); for(int i = 0; i < REPLICATION_NUM; i++) { checkMetaInfo(namespaceId, lastblock, idps[i]); } //setup random block sizes int lastblocksize = ORG_FILE_SIZE % BLOCK_SIZE; Integer[] newblocksizes = new Integer[REPLICATION_NUM]; for(int i = 0; i < REPLICATION_NUM; i++) { newblocksizes[i] = AppendTestUtil.nextInt(lastblocksize); } if (forceOneBlockToZero) { newblocksizes[0] = 0; } DataNode.LOG.info("newblocksizes = " + Arrays.asList(newblocksizes)); //update blocks with random block sizes Block[] newblocks = new Block[REPLICATION_NUM]; for(int i = 0; i < REPLICATION_NUM; i++) { DataNode dn = datanodes[i]; FSDatasetTestUtil.truncateBlock(dn, lastblock, newblocksizes[i], namespaceId); newblocks[i] = new Block(lastblock.getBlockId(), newblocksizes[i], lastblock.getGenerationStamp()); checkMetaInfo(namespaceId, newblocks[i], idps[i]); } DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName); cluster.getNameNode().append(filestr, dfs.dfs.clientName); //block synchronization final int primarydatanodeindex = AppendTestUtil.nextInt(datanodes.length); DataNode.LOG.info("primarydatanodeindex =" + primarydatanodeindex); DataNode primary = datanodes[primarydatanodeindex]; DataNode.LOG.info("primary.dnRegistration=" + primary.getDNRegistrationForNS( cluster.getNameNode().getNamespaceID())); primary.recoverBlocks(namespaceId, new Block[]{lastblock}, new DatanodeInfo[][]{datanodeinfos}).join(); BlockMetaDataInfo[] updatedmetainfo = new BlockMetaDataInfo[REPLICATION_NUM]; int minsize = min(newblocksizes); long currentGS = cluster.getNameNode().namesystem.getGenerationStamp(); lastblock.setGenerationStamp(currentGS); for(int i = 0; i < REPLICATION_NUM; i++) { updatedmetainfo[i] = idps[i].getBlockMetaDataInfo( namespaceId, lastblock); RPC.stopProxy(idps[i]); assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId()); assertEquals(minsize, updatedmetainfo[i].getNumBytes()); assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp()); } }
Example 4
Source File: TestLeaseRecovery.java From hadoop-gpu with Apache License 2.0 | 4 votes |
/** * The following test first creates a file with a few blocks. * It randomly truncates the replica of the last block stored in each datanode. * Finally, it triggers block synchronization to synchronize all stored block. */ public void testBlockSynchronization() throws Exception { final int ORG_FILE_SIZE = 3000; Configuration conf = new Configuration(); conf.setLong("dfs.block.size", BLOCK_SIZE); conf.setBoolean("dfs.support.append", true); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster(conf, 5, true, null); cluster.waitActive(); //create a file DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L); assertTrue(dfs.dfs.exists(filestr)); DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM); //get block info for the last block LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock( dfs.dfs.namenode, filestr); DatanodeInfo[] datanodeinfos = locatedblock.getLocations(); assertEquals(REPLICATION_NUM, datanodeinfos.length); //connect to data nodes InterDatanodeProtocol[] idps = new InterDatanodeProtocol[REPLICATION_NUM]; DataNode[] datanodes = new DataNode[REPLICATION_NUM]; for(int i = 0; i < REPLICATION_NUM; i++) { idps[i] = DataNode.createInterDataNodeProtocolProxy(datanodeinfos[i], conf); datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort()); assertTrue(datanodes[i] != null); } //verify BlockMetaDataInfo Block lastblock = locatedblock.getBlock(); DataNode.LOG.info("newblocks=" + lastblock); for(int i = 0; i < REPLICATION_NUM; i++) { checkMetaInfo(lastblock, idps[i]); } //setup random block sizes int lastblocksize = ORG_FILE_SIZE % BLOCK_SIZE; Integer[] newblocksizes = new Integer[REPLICATION_NUM]; for(int i = 0; i < REPLICATION_NUM; i++) { newblocksizes[i] = AppendTestUtil.nextInt(lastblocksize); } DataNode.LOG.info("newblocksizes = " + Arrays.asList(newblocksizes)); //update blocks with random block sizes Block[] newblocks = new Block[REPLICATION_NUM]; for(int i = 0; i < REPLICATION_NUM; i++) { newblocks[i] = new Block(lastblock.getBlockId(), newblocksizes[i], lastblock.getGenerationStamp()); idps[i].updateBlock(lastblock, newblocks[i], false); checkMetaInfo(newblocks[i], idps[i]); } DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName); cluster.getNameNode().append(filestr, dfs.dfs.clientName); //block synchronization final int primarydatanodeindex = AppendTestUtil.nextInt(datanodes.length); DataNode.LOG.info("primarydatanodeindex =" + primarydatanodeindex); DataNode primary = datanodes[primarydatanodeindex]; DataNode.LOG.info("primary.dnRegistration=" + primary.dnRegistration); primary.recoverBlocks(new Block[]{lastblock}, new DatanodeInfo[][]{datanodeinfos}).join(); BlockMetaDataInfo[] updatedmetainfo = new BlockMetaDataInfo[REPLICATION_NUM]; int minsize = min(newblocksizes); long currentGS = cluster.getNameNode().namesystem.getGenerationStamp(); lastblock.setGenerationStamp(currentGS); for(int i = 0; i < REPLICATION_NUM; i++) { updatedmetainfo[i] = idps[i].getBlockMetaDataInfo(lastblock); assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId()); assertEquals(minsize, updatedmetainfo[i].getNumBytes()); assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp()); } } finally { if (cluster != null) {cluster.shutdown();} } }