Java Code Examples for org.apache.hadoop.hdfs.DFSTestUtil#writeFile()
The following examples show how to use
org.apache.hadoop.hdfs.DFSTestUtil#writeFile() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFailoverWithBlockTokensEnabled.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void ensureInvalidBlockTokensAreRejected() throws IOException, URISyntaxException { cluster.transitionToActive(0); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.writeFile(fs, TEST_PATH, TEST_DATA); assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH)); DFSClient dfsClient = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs); DFSClient spyDfsClient = Mockito.spy(dfsClient); Mockito.doAnswer( new Answer<LocatedBlocks>() { @Override public LocatedBlocks answer(InvocationOnMock arg0) throws Throwable { LocatedBlocks locatedBlocks = (LocatedBlocks)arg0.callRealMethod(); for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) { Token<BlockTokenIdentifier> token = lb.getBlockToken(); BlockTokenIdentifier id = lb.getBlockToken().decodeIdentifier(); // This will make the token invalid, since the password // won't match anymore id.setExpiryDate(Time.now() + 10); Token<BlockTokenIdentifier> newToken = new Token<BlockTokenIdentifier>(id.getBytes(), token.getPassword(), token.getKind(), token.getService()); lb.setBlockToken(newToken); } return locatedBlocks; } }).when(spyDfsClient).getLocatedBlocks(Mockito.anyString(), Mockito.anyLong(), Mockito.anyLong()); DFSClientAdapter.setDFSClient((DistributedFileSystem)fs, spyDfsClient); try { assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH)); fail("Shouldn't have been able to read a file with invalid block tokens"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains("Could not obtain block", ioe); } }
Example 2
Source File: TestFailoverWithBlockTokensEnabled.java From hadoop with Apache License 2.0 | 5 votes |
private void writeUsingBothNameNodes() throws ServiceFailedException, IOException, URISyntaxException { cluster.transitionToActive(0); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.writeFile(fs, TEST_PATH, TEST_DATA); cluster.transitionToStandby(0); cluster.transitionToActive(1); fs.delete(TEST_PATH, false); DFSTestUtil.writeFile(fs, TEST_PATH, TEST_DATA); }
Example 3
Source File: TestHostsFiles.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testHostsIncludeForDeadCount() throws Exception { Configuration conf = getConf(); // Configure an excludes file FileSystem localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/temp/decommission"); Path excludeFile = new Path(dir, "exclude"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); StringBuilder includeHosts = new StringBuilder(); includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777") .append("\n"); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString()); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); assertTrue(ns.getNumDeadDataNodes() == 2); assertTrue(ns.getNumLiveDataNodes() == 0); // Testing using MBeans MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName = new ObjectName( "Hadoop:service=NameNode,name=FSNamesystemState"); String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + ""; assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2); assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 4
Source File: TestHostsFiles.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testHostsIncludeForDeadCount() throws Exception { Configuration conf = getConf(); // Configure an excludes file FileSystem localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/temp/decommission"); Path excludeFile = new Path(dir, "exclude"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); StringBuilder includeHosts = new StringBuilder(); includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777") .append("\n"); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString()); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); assertTrue(ns.getNumDeadDataNodes() == 2); assertTrue(ns.getNumLiveDataNodes() == 0); // Testing using MBeans MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName = new ObjectName( "Hadoop:service=NameNode,name=FSNamesystemState"); String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + ""; assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2); assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 5
Source File: TestFailoverWithBlockTokensEnabled.java From big-c with Apache License 2.0 | 5 votes |
private void writeUsingBothNameNodes() throws ServiceFailedException, IOException, URISyntaxException { cluster.transitionToActive(0); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.writeFile(fs, TEST_PATH, TEST_DATA); cluster.transitionToStandby(0); cluster.transitionToActive(1); fs.delete(TEST_PATH, false); DFSTestUtil.writeFile(fs, TEST_PATH, TEST_DATA); }
Example 6
Source File: TestFailoverWithBlockTokensEnabled.java From big-c with Apache License 2.0 | 5 votes |
@Test public void ensureInvalidBlockTokensAreRejected() throws IOException, URISyntaxException { cluster.transitionToActive(0); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.writeFile(fs, TEST_PATH, TEST_DATA); assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH)); DFSClient dfsClient = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs); DFSClient spyDfsClient = Mockito.spy(dfsClient); Mockito.doAnswer( new Answer<LocatedBlocks>() { @Override public LocatedBlocks answer(InvocationOnMock arg0) throws Throwable { LocatedBlocks locatedBlocks = (LocatedBlocks)arg0.callRealMethod(); for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) { Token<BlockTokenIdentifier> token = lb.getBlockToken(); BlockTokenIdentifier id = lb.getBlockToken().decodeIdentifier(); // This will make the token invalid, since the password // won't match anymore id.setExpiryDate(Time.now() + 10); Token<BlockTokenIdentifier> newToken = new Token<BlockTokenIdentifier>(id.getBytes(), token.getPassword(), token.getKind(), token.getService()); lb.setBlockToken(newToken); } return locatedBlocks; } }).when(spyDfsClient).getLocatedBlocks(Mockito.anyString(), Mockito.anyLong(), Mockito.anyLong()); DFSClientAdapter.setDFSClient((DistributedFileSystem)fs, spyDfsClient); try { assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH)); fail("Shouldn't have been able to read a file with invalid block tokens"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains("Could not obtain block", ioe); } }
Example 7
Source File: TestBlocksWithNotEnoughRacks.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testNodeDecomissionWithOverreplicationRespectsRackPolicy() throws Exception { Configuration conf = getConf(); short REPLICATION_FACTOR = 5; final Path filePath = new Path("/testFile"); // Configure an excludes file FileSystem localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/temp/decommission"); Path excludeFile = new Path(dir, "exclude"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); DFSTestUtil.writeFile(localFileSys, includeFile, ""); conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); // All hosts are on two racks, only one host on /rack2 String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"}; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); try { final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Lower the replication factor so the blocks are over replicated REPLICATION_FACTOR = 2; fs.setReplication(filePath, REPLICATION_FACTOR); // Decommission one of the hosts with the block that is not on // the lone host on rack2 (if we decomission that host it would // be impossible to respect the rack policy). BlockLocation locs[] = fs.getFileBlockLocations( fs.getFileStatus(filePath), 0, Long.MAX_VALUE); for (String top : locs[0].getTopologyPaths()) { if (!top.startsWith("/rack2")) { String name = top.substring("/rack1".length()+1); DFSTestUtil.writeFile(localFileSys, excludeFile, name); ns.getBlockManager().getDatanodeManager().refreshNodes(conf); DFSTestUtil.waitForDecommission(fs, name); break; } } // Check the block still has sufficient # replicas across racks, // ie we didn't remove the replica on the host on /rack1. DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); } finally { cluster.shutdown(); } }
Example 8
Source File: TestBlocksWithNotEnoughRacks.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testNodeDecomissionWithOverreplicationRespectsRackPolicy() throws Exception { Configuration conf = getConf(); short REPLICATION_FACTOR = 5; final Path filePath = new Path("/testFile"); // Configure an excludes file FileSystem localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/temp/decommission"); Path excludeFile = new Path(dir, "exclude"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); DFSTestUtil.writeFile(localFileSys, includeFile, ""); conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); // All hosts are on two racks, only one host on /rack2 String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"}; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); try { final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Lower the replication factor so the blocks are over replicated REPLICATION_FACTOR = 2; fs.setReplication(filePath, REPLICATION_FACTOR); // Decommission one of the hosts with the block that is not on // the lone host on rack2 (if we decomission that host it would // be impossible to respect the rack policy). BlockLocation locs[] = fs.getFileBlockLocations( fs.getFileStatus(filePath), 0, Long.MAX_VALUE); for (String top : locs[0].getTopologyPaths()) { if (!top.startsWith("/rack2")) { String name = top.substring("/rack1".length()+1); DFSTestUtil.writeFile(localFileSys, excludeFile, name); ns.getBlockManager().getDatanodeManager().refreshNodes(conf); DFSTestUtil.waitForDecommission(fs, name); break; } } // Check the block still has sufficient # replicas across racks, // ie we didn't remove the replica on the host on /rack1. DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); } finally { cluster.shutdown(); } }
Example 9
Source File: TestBlocksWithNotEnoughRacks.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testNodeDecomissionRespectsRackPolicy() throws Exception { Configuration conf = getConf(); short REPLICATION_FACTOR = 2; final Path filePath = new Path("/testFile"); // Configure an excludes file FileSystem localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/temp/decommission"); Path excludeFile = new Path(dir, "exclude"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); DFSTestUtil.writeFile(localFileSys, includeFile, ""); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); // Two blocks and four racks String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"}; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); try { // Create a file with one block final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Decommission one of the hosts with the block, this should cause // the block to get replicated to another host on the same rack, // otherwise the rack policy is violated. BlockLocation locs[] = fs.getFileBlockLocations( fs.getFileStatus(filePath), 0, Long.MAX_VALUE); String name = locs[0].getNames()[0]; DFSTestUtil.writeFile(localFileSys, excludeFile, name); ns.getBlockManager().getDatanodeManager().refreshNodes(conf); DFSTestUtil.waitForDecommission(fs, name); // Check the block still has sufficient # replicas across racks DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); } finally { cluster.shutdown(); } }
Example 10
Source File: TestHostsFiles.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testHostsExcludeInUI() throws Exception { Configuration conf = getConf(); short REPLICATION_FACTOR = 2; final Path filePath = new Path("/testFile"); // Configure an excludes file FileSystem localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/temp/decommission"); Path excludeFile = new Path(dir, "exclude"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); DFSTestUtil.writeFile(localFileSys, includeFile, ""); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); // Two blocks and four racks String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"}; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); try { // Create a file with one block final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Decommission one of the hosts with the block, this should cause // the block to get replicated to another host on the same rack, // otherwise the rack policy is violated. BlockLocation locs[] = fs.getFileBlockLocations( fs.getFileStatus(filePath), 0, Long.MAX_VALUE); String name = locs[0].getNames()[0]; String names = name + "\n" + "localhost:42\n"; LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath()); DFSTestUtil.writeFile(localFileSys, excludeFile, name); ns.getBlockManager().getDatanodeManager().refreshNodes(conf); DFSTestUtil.waitForDecommission(fs, name); // Check the block still has sufficient # replicas across racks DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo"); String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes"); assertTrue("Live nodes should contain the decommissioned node", nodes.contains("Decommissioned")); } finally { cluster.shutdown(); } }
Example 11
Source File: TestStandbyBlockManagement.java From big-c with Apache License 2.0 | 4 votes |
@Test(timeout=60000) public void testInvalidateBlock() throws Exception { Configuration conf = new Configuration(); HAUtil.setAllowStandbyReads(conf, true); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(3) .build(); try { cluster.waitActive(); cluster.transitionToActive(0); NameNode nn1 = cluster.getNameNode(0); NameNode nn2 = cluster.getNameNode(1); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); Thread.sleep(1000); LOG.info("=================================="); DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA); // Have to force an edit log roll so that the standby catches up nn1.getRpcServer().rollEditLog(); LOG.info("=================================="); // delete the file fs.delete(TEST_FILE_PATH, false); BlockManagerTestUtil.computeAllPendingWork( nn1.getNamesystem().getBlockManager()); nn1.getRpcServer().rollEditLog(); // standby nn doesn't need to invalidate blocks. assertEquals(0, nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount()); cluster.triggerHeartbeats(); cluster.triggerBlockReports(); // standby nn doesn't need to invalidate blocks. assertEquals(0, nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount()); } finally { cluster.shutdown(); } }
Example 12
Source File: TestStandbyIsHot.java From big-c with Apache License 2.0 | 4 votes |
@Test(timeout=60000) public void testStandbyIsHot() throws Exception { Configuration conf = new Configuration(); // We read from the standby to watch block locations HAUtil.setAllowStandbyReads(conf, true); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(3) .build(); try { cluster.waitActive(); cluster.transitionToActive(0); NameNode nn1 = cluster.getNameNode(0); NameNode nn2 = cluster.getNameNode(1); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); Thread.sleep(1000); System.err.println("=================================="); DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA); // Have to force an edit log roll so that the standby catches up nn1.getRpcServer().rollEditLog(); System.err.println("=================================="); // Block locations should show up on standby. LOG.info("Waiting for block locations to appear on standby node"); waitForBlockLocations(cluster, nn2, TEST_FILE, 3); // Trigger immediate heartbeats and block reports so // that the active "trusts" all of the DNs cluster.triggerHeartbeats(); cluster.triggerBlockReports(); // Change replication LOG.info("Changing replication to 1"); fs.setReplication(TEST_FILE_PATH, (short)1); BlockManagerTestUtil.computeAllPendingWork( nn1.getNamesystem().getBlockManager()); waitForBlockLocations(cluster, nn1, TEST_FILE, 1); nn1.getRpcServer().rollEditLog(); LOG.info("Waiting for lowered replication to show up on standby"); waitForBlockLocations(cluster, nn2, TEST_FILE, 1); // Change back to 3 LOG.info("Changing replication to 3"); fs.setReplication(TEST_FILE_PATH, (short)3); BlockManagerTestUtil.computeAllPendingWork( nn1.getNamesystem().getBlockManager()); nn1.getRpcServer().rollEditLog(); LOG.info("Waiting for higher replication to show up on standby"); waitForBlockLocations(cluster, nn2, TEST_FILE, 3); } finally { cluster.shutdown(); } }
Example 13
Source File: TestNameNodeMXBean.java From big-c with Apache License 2.0 | 4 votes |
@SuppressWarnings({ "unchecked" }) @Test public void testLastContactTime() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FSNamesystem fsn = cluster.getNameNode().namesystem; MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName = new ObjectName( "Hadoop:service=NameNode,name=NameNodeInfo"); // Define include file to generate deadNodes metrics FileSystem localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/temp/TestNameNodeMXBean"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); StringBuilder includeHosts = new StringBuilder(); for(DataNode dn : cluster.getDataNodes()) { includeHosts.append(dn.getDisplayName()).append("\n"); } DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString()); conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); fsn.getBlockManager().getDatanodeManager().refreshNodes(conf); cluster.stopDataNode(0); while (fsn.getBlockManager().getDatanodeManager().getNumLiveDataNodes() != 2 ) { Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); } // get attribute deadnodeinfo String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName, "DeadNodes")); assertEquals(fsn.getDeadNodes(), deadnodeinfo); Map<String, Map<String, Object>> deadNodes = (Map<String, Map<String, Object>>) JSON.parse(deadnodeinfo); assertTrue(deadNodes.size() > 0); for (Map<String, Object> deadNode : deadNodes.values()) { assertTrue(deadNode.containsKey("lastContact")); assertTrue(deadNode.containsKey("decommissioned")); assertTrue(deadNode.containsKey("xferaddr")); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 14
Source File: TestWithMiniClusterBase.java From NNAnalytics with Apache License 2.0 | 4 votes |
protected void addFiles(int numOfFiles, long sleepBetweenMs) throws Exception { DistributedFileSystem fileSystem = (DistributedFileSystem) FileSystem.get(CONF); for (int i = 0; i < numOfFiles; i++) { int dirNumber1 = RANDOM.nextInt(10); Path dirPath = new Path("/dir" + dirNumber1); int dirNumber2 = RANDOM.nextInt(10); dirPath = dirPath.suffix("/dir" + dirNumber2); int dirNumber3 = RANDOM.nextInt(10); dirPath = dirPath.suffix("/dir" + dirNumber3); fileSystem.mkdirs(dirPath); Path filePath = dirPath.suffix("/file" + i); int fileType = RANDOM.nextInt(7); switch (fileType) { case 0: filePath = filePath.suffix(".zip"); break; case 1: filePath = filePath.suffix(".avro"); break; case 2: filePath = filePath.suffix(".orc"); break; case 3: filePath = filePath.suffix(".txt"); break; case 4: filePath = filePath.suffix(".json"); break; case 5: filePath = dirPath.suffix("/part-r-" + i); break; case 6: filePath = filePath.suffix("_45454"); default: break; } int fileSize = RANDOM.nextInt(4); switch (fileSize) { case 1: DFSTestUtil.writeFile(fileSystem, filePath, new String(TINY_FILE_BYTES)); break; case 2: DFSTestUtil.writeFile(fileSystem, filePath, new String(SMALL_FILE_BYTES)); break; case 3: DFSTestUtil.writeFile(fileSystem, filePath, new String(MEDIUM_FILE_BYTES)); break; case 0: default: DFSTestUtil.writeFile(fileSystem, filePath, ""); break; } if (dirNumber1 == 1) { fileSystem.setQuota(filePath.getParent(), 100L, 100000000000L); } int user = RANDOM.nextInt(3); switch (user) { case 1: fileSystem.setOwner(filePath, USERS[0], USERS[0]); break; case 2: fileSystem.setOwner(filePath, USERS[1], USERS[1]); break; case 0: default: break; } short repFactor = (short) RANDOM.nextInt(4); if (repFactor != 0) { fileSystem.setReplication(filePath, repFactor); } int weeksAgo = RANDOM.nextInt(60); long timeStamp = System.currentTimeMillis() - TimeUnit.DAYS.toMillis(weeksAgo * 7); if (weeksAgo != 0) { fileSystem.setTimes(filePath, timeStamp, timeStamp); } if (sleepBetweenMs != 0L) { Thread.sleep(sleepBetweenMs); } } }
Example 15
Source File: TestBlocksWithNotEnoughRacks.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testNodeDecomissionRespectsRackPolicy() throws Exception { Configuration conf = getConf(); short REPLICATION_FACTOR = 2; final Path filePath = new Path("/testFile"); // Configure an excludes file FileSystem localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/temp/decommission"); Path excludeFile = new Path(dir, "exclude"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); DFSTestUtil.writeFile(localFileSys, includeFile, ""); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); // Two blocks and four racks String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"}; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); try { // Create a file with one block final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Decommission one of the hosts with the block, this should cause // the block to get replicated to another host on the same rack, // otherwise the rack policy is violated. BlockLocation locs[] = fs.getFileBlockLocations( fs.getFileStatus(filePath), 0, Long.MAX_VALUE); String name = locs[0].getNames()[0]; DFSTestUtil.writeFile(localFileSys, excludeFile, name); ns.getBlockManager().getDatanodeManager().refreshNodes(conf); DFSTestUtil.waitForDecommission(fs, name); // Check the block still has sufficient # replicas across racks DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); } finally { cluster.shutdown(); } }
Example 16
Source File: TestHostsFiles.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testHostsExcludeInUI() throws Exception { Configuration conf = getConf(); short REPLICATION_FACTOR = 2; final Path filePath = new Path("/testFile"); // Configure an excludes file FileSystem localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/temp/decommission"); Path excludeFile = new Path(dir, "exclude"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); DFSTestUtil.writeFile(localFileSys, includeFile, ""); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); // Two blocks and four racks String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"}; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); try { // Create a file with one block final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Decommission one of the hosts with the block, this should cause // the block to get replicated to another host on the same rack, // otherwise the rack policy is violated. BlockLocation locs[] = fs.getFileBlockLocations( fs.getFileStatus(filePath), 0, Long.MAX_VALUE); String name = locs[0].getNames()[0]; String names = name + "\n" + "localhost:42\n"; LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath()); DFSTestUtil.writeFile(localFileSys, excludeFile, name); ns.getBlockManager().getDatanodeManager().refreshNodes(conf); DFSTestUtil.waitForDecommission(fs, name); // Check the block still has sufficient # replicas across racks DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo"); String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes"); assertTrue("Live nodes should contain the decommissioned node", nodes.contains("Decommissioned")); } finally { cluster.shutdown(); } }
Example 17
Source File: TestStandbyBlockManagement.java From hadoop with Apache License 2.0 | 4 votes |
@Test(timeout=60000) public void testInvalidateBlock() throws Exception { Configuration conf = new Configuration(); HAUtil.setAllowStandbyReads(conf, true); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(3) .build(); try { cluster.waitActive(); cluster.transitionToActive(0); NameNode nn1 = cluster.getNameNode(0); NameNode nn2 = cluster.getNameNode(1); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); Thread.sleep(1000); LOG.info("=================================="); DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA); // Have to force an edit log roll so that the standby catches up nn1.getRpcServer().rollEditLog(); LOG.info("=================================="); // delete the file fs.delete(TEST_FILE_PATH, false); BlockManagerTestUtil.computeAllPendingWork( nn1.getNamesystem().getBlockManager()); nn1.getRpcServer().rollEditLog(); // standby nn doesn't need to invalidate blocks. assertEquals(0, nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount()); cluster.triggerHeartbeats(); cluster.triggerBlockReports(); // standby nn doesn't need to invalidate blocks. assertEquals(0, nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount()); } finally { cluster.shutdown(); } }
Example 18
Source File: TestStandbyIsHot.java From hadoop with Apache License 2.0 | 4 votes |
@Test(timeout=60000) public void testStandbyIsHot() throws Exception { Configuration conf = new Configuration(); // We read from the standby to watch block locations HAUtil.setAllowStandbyReads(conf, true); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(3) .build(); try { cluster.waitActive(); cluster.transitionToActive(0); NameNode nn1 = cluster.getNameNode(0); NameNode nn2 = cluster.getNameNode(1); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); Thread.sleep(1000); System.err.println("=================================="); DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA); // Have to force an edit log roll so that the standby catches up nn1.getRpcServer().rollEditLog(); System.err.println("=================================="); // Block locations should show up on standby. LOG.info("Waiting for block locations to appear on standby node"); waitForBlockLocations(cluster, nn2, TEST_FILE, 3); // Trigger immediate heartbeats and block reports so // that the active "trusts" all of the DNs cluster.triggerHeartbeats(); cluster.triggerBlockReports(); // Change replication LOG.info("Changing replication to 1"); fs.setReplication(TEST_FILE_PATH, (short)1); BlockManagerTestUtil.computeAllPendingWork( nn1.getNamesystem().getBlockManager()); waitForBlockLocations(cluster, nn1, TEST_FILE, 1); nn1.getRpcServer().rollEditLog(); LOG.info("Waiting for lowered replication to show up on standby"); waitForBlockLocations(cluster, nn2, TEST_FILE, 1); // Change back to 3 LOG.info("Changing replication to 3"); fs.setReplication(TEST_FILE_PATH, (short)3); BlockManagerTestUtil.computeAllPendingWork( nn1.getNamesystem().getBlockManager()); nn1.getRpcServer().rollEditLog(); LOG.info("Waiting for higher replication to show up on standby"); waitForBlockLocations(cluster, nn2, TEST_FILE, 3); } finally { cluster.shutdown(); } }
Example 19
Source File: TestNameNodeMXBean.java From hadoop with Apache License 2.0 | 4 votes |
@SuppressWarnings({ "unchecked" }) @Test public void testLastContactTime() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FSNamesystem fsn = cluster.getNameNode().namesystem; MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName = new ObjectName( "Hadoop:service=NameNode,name=NameNodeInfo"); // Define include file to generate deadNodes metrics FileSystem localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/temp/TestNameNodeMXBean"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); StringBuilder includeHosts = new StringBuilder(); for(DataNode dn : cluster.getDataNodes()) { includeHosts.append(dn.getDisplayName()).append("\n"); } DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString()); conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); fsn.getBlockManager().getDatanodeManager().refreshNodes(conf); cluster.stopDataNode(0); while (fsn.getBlockManager().getDatanodeManager().getNumLiveDataNodes() != 2 ) { Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); } // get attribute deadnodeinfo String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName, "DeadNodes")); assertEquals(fsn.getDeadNodes(), deadnodeinfo); Map<String, Map<String, Object>> deadNodes = (Map<String, Map<String, Object>>) JSON.parse(deadnodeinfo); assertTrue(deadNodes.size() > 0); for (Map<String, Object> deadNode : deadNodes.values()) { assertTrue(deadNode.containsKey("lastContact")); assertTrue(deadNode.containsKey("decommissioned")); assertTrue(deadNode.containsKey("xferaddr")); } } finally { if (cluster != null) { cluster.shutdown(); } } }