Java Code Examples for org.apache.hadoop.hdfs.server.common.GenerationStamp#LAST_RESERVED_STAMP
The following examples show how to use
org.apache.hadoop.hdfs.server.common.GenerationStamp#LAST_RESERVED_STAMP .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestComputeInvalidateWork.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test if {@link BlockManager#computeInvalidateWork(int)} * can schedule invalidate work correctly */ @Test(timeout=120000) public void testCompInvalidate() throws Exception { final int blockInvalidateLimit = bm.getDatanodeManager() .blockInvalidateLimit; namesystem.writeLock(); try { for (int i=0; i<nodes.length; i++) { for(int j=0; j<3*blockInvalidateLimit+1; j++) { Block block = new Block(i*(blockInvalidateLimit+1)+j, 0, GenerationStamp.LAST_RESERVED_STAMP); bm.addToInvalidates(block, nodes[i]); } } assertEquals(blockInvalidateLimit*NUM_OF_DATANODES, bm.computeInvalidateWork(NUM_OF_DATANODES+1)); assertEquals(blockInvalidateLimit*NUM_OF_DATANODES, bm.computeInvalidateWork(NUM_OF_DATANODES)); assertEquals(blockInvalidateLimit*(NUM_OF_DATANODES-1), bm.computeInvalidateWork(NUM_OF_DATANODES-1)); int workCount = bm.computeInvalidateWork(1); if (workCount == 1) { assertEquals(blockInvalidateLimit+1, bm.computeInvalidateWork(2)); } else { assertEquals(workCount, blockInvalidateLimit); assertEquals(2, bm.computeInvalidateWork(2)); } } finally { namesystem.writeUnlock(); } }
Example 2
Source File: TestComputeInvalidateWork.java From big-c with Apache License 2.0 | 5 votes |
/** * Test if {@link BlockManager#computeInvalidateWork(int)} * can schedule invalidate work correctly */ @Test(timeout=120000) public void testCompInvalidate() throws Exception { final int blockInvalidateLimit = bm.getDatanodeManager() .blockInvalidateLimit; namesystem.writeLock(); try { for (int i=0; i<nodes.length; i++) { for(int j=0; j<3*blockInvalidateLimit+1; j++) { Block block = new Block(i*(blockInvalidateLimit+1)+j, 0, GenerationStamp.LAST_RESERVED_STAMP); bm.addToInvalidates(block, nodes[i]); } } assertEquals(blockInvalidateLimit*NUM_OF_DATANODES, bm.computeInvalidateWork(NUM_OF_DATANODES+1)); assertEquals(blockInvalidateLimit*NUM_OF_DATANODES, bm.computeInvalidateWork(NUM_OF_DATANODES)); assertEquals(blockInvalidateLimit*(NUM_OF_DATANODES-1), bm.computeInvalidateWork(NUM_OF_DATANODES-1)); int workCount = bm.computeInvalidateWork(1); if (workCount == 1) { assertEquals(blockInvalidateLimit+1, bm.computeInvalidateWork(2)); } else { assertEquals(workCount, blockInvalidateLimit); assertEquals(2, bm.computeInvalidateWork(2)); } } finally { namesystem.writeUnlock(); } }
Example 3
Source File: TestBlockInfoUnderConstruction.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testInitializeBlockRecovery() throws Exception { DatanodeStorageInfo s1 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.1", "s1"); DatanodeDescriptor dd1 = s1.getDatanodeDescriptor(); DatanodeStorageInfo s2 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.2", "s2"); DatanodeDescriptor dd2 = s2.getDatanodeDescriptor(); DatanodeStorageInfo s3 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.3", "s3"); DatanodeDescriptor dd3 = s3.getDatanodeDescriptor(); dd1.isAlive = dd2.isAlive = dd3.isAlive = true; BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction( new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, BlockUCState.UNDER_CONSTRUCTION, new DatanodeStorageInfo[] {s1, s2, s3}); // Recovery attempt #1. DFSTestUtil.resetLastUpdatesWithOffset(dd1, -3 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, -2 * 1000); blockInfo.initializeBlockRecovery(1); BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1); assertEquals(blockInfoRecovery[0], blockInfo); // Recovery attempt #2. DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000); blockInfo.initializeBlockRecovery(2); blockInfoRecovery = dd1.getLeaseRecoveryCommand(1); assertEquals(blockInfoRecovery[0], blockInfo); // Recovery attempt #3. DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000); blockInfo.initializeBlockRecovery(3); blockInfoRecovery = dd3.getLeaseRecoveryCommand(1); assertEquals(blockInfoRecovery[0], blockInfo); // Recovery attempt #4. // Reset everything. And again pick DN with most recent heart beat. DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0); blockInfo.initializeBlockRecovery(3); blockInfoRecovery = dd3.getLeaseRecoveryCommand(1); assertEquals(blockInfoRecovery[0], blockInfo); }
Example 4
Source File: TestBlockInfoUnderConstruction.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testInitializeBlockRecovery() throws Exception { DatanodeStorageInfo s1 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.1", "s1"); DatanodeDescriptor dd1 = s1.getDatanodeDescriptor(); DatanodeStorageInfo s2 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.2", "s2"); DatanodeDescriptor dd2 = s2.getDatanodeDescriptor(); DatanodeStorageInfo s3 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.3", "s3"); DatanodeDescriptor dd3 = s3.getDatanodeDescriptor(); dd1.isAlive = dd2.isAlive = dd3.isAlive = true; BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction( new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, BlockUCState.UNDER_CONSTRUCTION, new DatanodeStorageInfo[] {s1, s2, s3}); // Recovery attempt #1. DFSTestUtil.resetLastUpdatesWithOffset(dd1, -3 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, -2 * 1000); blockInfo.initializeBlockRecovery(1); BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1); assertEquals(blockInfoRecovery[0], blockInfo); // Recovery attempt #2. DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000); blockInfo.initializeBlockRecovery(2); blockInfoRecovery = dd1.getLeaseRecoveryCommand(1); assertEquals(blockInfoRecovery[0], blockInfo); // Recovery attempt #3. DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000); blockInfo.initializeBlockRecovery(3); blockInfoRecovery = dd3.getLeaseRecoveryCommand(1); assertEquals(blockInfoRecovery[0], blockInfo); // Recovery attempt #4. // Reset everything. And again pick DN with most recent heart beat. DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0); blockInfo.initializeBlockRecovery(3); blockInfoRecovery = dd3.getLeaseRecoveryCommand(1); assertEquals(blockInfoRecovery[0], blockInfo); }