Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster#waitActive()
The following examples show how to use
org.apache.hadoop.hdfs.MiniDFSCluster#waitActive() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestNameNodeRpcServer.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testNamenodeRpcBindAny() throws IOException { Configuration conf = new HdfsConfiguration(); // The name node in MiniDFSCluster only binds to 127.0.0.1. // We can set the bind address to 0.0.0.0 to make it listen // to all interfaces. conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, "0.0.0.0"); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); assertEquals("0.0.0.0", ((NameNodeRpcServer)cluster.getNameNodeRpc()) .getClientRpcServer().getListenerAddress().getHostName()); } finally { if (cluster != null) { cluster.shutdown(); } // Reset the config conf.unset(DFS_NAMENODE_RPC_BIND_HOST_KEY); } }
Example 2
Source File: TestWriteToReplica.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testClose() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build(); try { cluster.waitActive(); DataNode dn = cluster.getDataNodes().get(0); FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn); // set up replicasMap String bpid = cluster.getNamesystem().getBlockPoolId(); ExtendedBlock[] blocks = setup(bpid, dataSet); // test close testClose(dataSet, blocks); } finally { cluster.shutdown(); } }
Example 3
Source File: TestNameNodeRpcServer.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testNamenodeRpcBindAny() throws IOException { Configuration conf = new HdfsConfiguration(); // The name node in MiniDFSCluster only binds to 127.0.0.1. // We can set the bind address to 0.0.0.0 to make it listen // to all interfaces. conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, "0.0.0.0"); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); assertEquals("0.0.0.0", ((NameNodeRpcServer)cluster.getNameNodeRpc()) .getClientRpcServer().getListenerAddress().getHostName()); } finally { if (cluster != null) { cluster.shutdown(); } // Reset the config conf.unset(DFS_NAMENODE_RPC_BIND_HOST_KEY); } }
Example 4
Source File: TestWebHDFSForHA.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testFailoverAfterOpen() throws IOException { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME + "://" + LOGICAL_NAME); MiniDFSCluster cluster = null; FileSystem fs = null; final Path p = new Path("/test"); final byte[] data = "Hello".getBytes(); try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(1).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); fs = FileSystem.get(WEBHDFS_URI, conf); cluster.transitionToActive(1); FSDataOutputStream out = fs.create(p); cluster.shutdownNameNode(1); cluster.transitionToActive(0); out.write(data); out.close(); FSDataInputStream in = fs.open(p); byte[] buf = new byte[data.length]; IOUtils.readFully(in, buf, 0, buf.length); Assert.assertArrayEquals(data, buf); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example 5
Source File: TestDirectoryRaidShellFsck.java From RDFS with Apache License 2.0 | 5 votes |
/** * creates a MiniDFS instance with a raided file in it */ public void setUpCluster(int rsPairtyLength) throws IOException, ClassNotFoundException { new File(TEST_DIR).mkdirs(); // Make sure data directory exists conf = new Configuration(); Utils.loadTestCodecs(conf, STRIPE_BLOCKS, STRIPE_BLOCKS, 1, rsPairtyLength, "/destraid", "/destraidrs", false, true); conf.setBoolean("dfs.permissions", false); cluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null); cluster.waitActive(); dfs = (DistributedFileSystem) cluster.getFileSystem(); String namenode = dfs.getUri().toString(); FileSystem.setDefaultUri(conf, namenode); Codec dirRS = Codec.getCodec("rs"); long[] crcs = new long[fileSizes.length]; int[] seeds = new int[fileSizes.length]; files = TestRaidDfs.createTestFiles(srcDir, fileSizes, blockSizes, crcs, seeds, (FileSystem)dfs, (short)1); assertTrue(RaidNode.doRaid(conf, dfs.getFileStatus(srcDir), new Path(dirRS.parityDirectory), dirRS, new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE, false, 1, 1)); srcStats = new FileStatus[files.length]; for (int i = 0 ; i < files.length; i++) { srcStats[i] = dfs.getFileStatus(files[i]); } parityStat = dfs.getFileStatus(parityFile); clientConf = new Configuration(conf); clientConf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedRaidFileSystem"); clientConf.set("fs.raid.underlyingfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem"); // prepare shell and arguments shell = new RaidShell(clientConf); args = new String[2]; args[0] = "-fsck"; args[1] = "/"; }
Example 6
Source File: TestFileChannel.java From jsr203-hadoop with Apache License 2.0 | 5 votes |
private static MiniDFSCluster startMini(String testName) throws IOException { File baseDir = new File("./target/hdfs/" + testName).getAbsoluteFile(); FileUtil.fullyDelete(baseDir); Configuration conf = new Configuration(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf); MiniDFSCluster hdfsCluster = builder.clusterId(testName).build(); hdfsCluster.waitActive(); return hdfsCluster; }
Example 7
Source File: TestWebHDFS.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test snapshot rename through WebHdfs */ @Test public void testWebHdfsRenameSnapshot() throws Exception { MiniDFSCluster cluster = null; final Configuration conf = WebHdfsTestUtil.createConf(); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); final Path foo = new Path("/foo"); dfs.mkdirs(foo); dfs.allowSnapshot(foo); webHdfs.createSnapshot(foo, "s1"); final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1"); Assert.assertTrue(webHdfs.exists(s1path)); // rename s1 to s2 webHdfs.renameSnapshot(foo, "s1", "s2"); Assert.assertFalse(webHdfs.exists(s1path)); final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2"); Assert.assertTrue(webHdfs.exists(s2path)); webHdfs.deleteSnapshot(foo, "s2"); Assert.assertFalse(webHdfs.exists(s2path)); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 8
Source File: TestNameNodeMetrics.java From hadoop-gpu with Apache License 2.0 | 5 votes |
@Override protected void setUp() throws Exception { cluster = new MiniDFSCluster(CONF, 3, true, null); cluster.waitActive(); namesystem = cluster.getNameNode().getNamesystem(); fs = (DistributedFileSystem) cluster.getFileSystem(); metrics = namesystem.getFSNamesystemMetrics(); }
Example 9
Source File: TestINodeFile.java From big-c with Apache License 2.0 | 4 votes |
/** * FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to * replace the original INodeDirectory. Before HDFS-4243, the parent field of * all the children INodes of the target INodeDirectory is not changed to * point to the new INodeDirectoryWithQuota. This testcase tests this * scenario. */ @Test public void testGetFullPathNameAfterSetQuota() throws Exception { long fileLen = 1024; replication = 3; Configuration conf = new Configuration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(replication).build(); cluster.waitActive(); FSNamesystem fsn = cluster.getNamesystem(); FSDirectory fsdir = fsn.getFSDirectory(); DistributedFileSystem dfs = cluster.getFileSystem(); // Create a file for test final Path dir = new Path("/dir"); final Path file = new Path(dir, "file"); DFSTestUtil.createFile(dfs, file, fileLen, replication, 0L); // Check the full path name of the INode associating with the file INode fnode = fsdir.getINode(file.toString()); assertEquals(file.toString(), fnode.getFullPathName()); // Call FSDirectory#unprotectedSetQuota which calls // INodeDirectory#replaceChild dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10); INodeDirectory dirNode = getDir(fsdir, dir); assertEquals(dir.toString(), dirNode.getFullPathName()); assertTrue(dirNode.isWithQuota()); final Path newDir = new Path("/newdir"); final Path newFile = new Path(newDir, "file"); // Also rename dir dfs.rename(dir, newDir, Options.Rename.OVERWRITE); // /dir/file now should be renamed to /newdir/file fnode = fsdir.getINode(newFile.toString()); // getFullPathName can return correct result only if the parent field of // child node is set correctly assertEquals(newFile.toString(), fnode.getFullPathName()); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 10
Source File: TestNameNodeResourceChecker.java From big-c with Apache License 2.0 | 4 votes |
/** * Tests that NameNode resource monitor causes the NN to enter safe mode when * resources are low. */ @Test public void testCheckThatNameNodeResourceMonitorIsRunning() throws IOException, InterruptedException { MiniDFSCluster cluster = null; try { conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath()); conf.setLong(DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY, 1); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1).build(); NameNodeResourceChecker mockResourceChecker = Mockito.mock(NameNodeResourceChecker.class); Mockito.when(mockResourceChecker.hasAvailableDiskSpace()).thenReturn(true); cluster.getNameNode().getNamesystem().nnResourceChecker = mockResourceChecker; cluster.waitActive(); String name = NameNodeResourceMonitor.class.getName(); boolean isNameNodeMonitorRunning = false; Set<Thread> runningThreads = Thread.getAllStackTraces().keySet(); for (Thread runningThread : runningThreads) { if (runningThread.toString().startsWith("Thread[" + name)) { isNameNodeMonitorRunning = true; break; } } assertTrue("NN resource monitor should be running", isNameNodeMonitorRunning); assertFalse("NN should not presently be in safe mode", cluster.getNameNode().isInSafeMode()); Mockito.when(mockResourceChecker.hasAvailableDiskSpace()).thenReturn(false); // Make sure the NNRM thread has a chance to run. long startMillis = Time.now(); while (!cluster.getNameNode().isInSafeMode() && Time.now() < startMillis + (60 * 1000)) { Thread.sleep(1000); } assertTrue("NN should be in safe mode after resources crossed threshold", cluster.getNameNode().isInSafeMode()); } finally { if (cluster != null) cluster.shutdown(); } }
Example 11
Source File: TestNameNodeMXBean.java From hadoop with Apache License 2.0 | 4 votes |
@SuppressWarnings({ "unchecked" }) @Test public void testLastContactTime() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FSNamesystem fsn = cluster.getNameNode().namesystem; MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName = new ObjectName( "Hadoop:service=NameNode,name=NameNodeInfo"); // Define include file to generate deadNodes metrics FileSystem localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/temp/TestNameNodeMXBean"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); StringBuilder includeHosts = new StringBuilder(); for(DataNode dn : cluster.getDataNodes()) { includeHosts.append(dn.getDisplayName()).append("\n"); } DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString()); conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); fsn.getBlockManager().getDatanodeManager().refreshNodes(conf); cluster.stopDataNode(0); while (fsn.getBlockManager().getDatanodeManager().getNumLiveDataNodes() != 2 ) { Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); } // get attribute deadnodeinfo String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName, "DeadNodes")); assertEquals(fsn.getDeadNodes(), deadnodeinfo); Map<String, Map<String, Object>> deadNodes = (Map<String, Map<String, Object>>) JSON.parse(deadnodeinfo); assertTrue(deadNodes.size() > 0); for (Map<String, Object> deadNode : deadNodes.values()) { assertTrue(deadNode.containsKey("lastContact")); assertTrue(deadNode.containsKey("decommissioned")); assertTrue(deadNode.containsKey("xferaddr")); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 12
Source File: TestPipelinesFailover.java From hadoop with Apache License 2.0 | 4 votes |
private void doWriteOverFailoverTest(TestScenario scenario, MethodToTestIdempotence methodToTest) throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); // Don't check replication periodically. conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000); FSDataOutputStream stm = null; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(3) .build(); try { int sizeWritten = 0; cluster.waitActive(); cluster.transitionToActive(0); Thread.sleep(500); LOG.info("Starting with NN 0 active"); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); stm = fs.create(TEST_PATH); // write a block and a half AppendTestUtil.write(stm, 0, BLOCK_AND_A_HALF); sizeWritten += BLOCK_AND_A_HALF; // Make sure all of the blocks are written out before failover. stm.hflush(); LOG.info("Failing over to NN 1"); scenario.run(cluster); // NOTE: explicitly do *not* make any further metadata calls // to the NN here. The next IPC call should be to allocate the next // block. Any other call would notice the failover and not test // idempotence of the operation (HDFS-3031) FSNamesystem ns1 = cluster.getNameNode(1).getNamesystem(); BlockManagerTestUtil.updateState(ns1.getBlockManager()); assertEquals(0, ns1.getPendingReplicationBlocks()); assertEquals(0, ns1.getCorruptReplicaBlocks()); assertEquals(0, ns1.getMissingBlocksCount()); // If we're testing allocateBlock()'s idempotence, write another // block and a half, so we have to allocate a new block. // Otherise, don't write anything, so our next RPC will be // completeFile() if we're testing idempotence of that operation. if (methodToTest == MethodToTestIdempotence.ALLOCATE_BLOCK) { // write another block and a half AppendTestUtil.write(stm, sizeWritten, BLOCK_AND_A_HALF); sizeWritten += BLOCK_AND_A_HALF; } stm.close(); stm = null; AppendTestUtil.check(fs, TEST_PATH, sizeWritten); } finally { IOUtils.closeStream(stm); cluster.shutdown(); } }
Example 13
Source File: TestListCorruptFileBlocks.java From hadoop with Apache License 2.0 | 4 votes |
/** * test listCorruptFileBlocks in DistributedFileSystem */ @Test (timeout=300000) public void testlistCorruptFileBlocksDFS() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans // directories FileSystem fs = null; MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DistributedFileSystem dfs = (DistributedFileSystem) fs; DFSTestUtil util = new DFSTestUtil.Builder(). setName("testGetCorruptFiles").setNumFiles(3). setMaxLevels(1).setMaxSize(1024).build(); util.createFiles(fs, "/corruptData"); RemoteIterator<Path> corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData")); int numCorrupt = countPaths(corruptFileBlocks); assertTrue(numCorrupt == 0); // delete the blocks String bpid = cluster.getNamesystem().getBlockPoolId(); // For loop through number of datadirectories per datanode (2) for (int i = 0; i < 2; i++) { File storageDir = cluster.getInstanceStorageDir(0, i); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; // assertTrue("Blocks do not exist in data-dir", (blocks != null) && // (blocks.length > 0)); for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); LOG.info("Deliberately removing file " + blockFile.getName()); assertTrue("Cannot remove file.", blockFile.delete()); LOG.info("Deliberately removing file " + metadataFile.getName()); assertTrue("Cannot remove file.", metadataFile.delete()); // break; } } int count = 0; corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData")); numCorrupt = countPaths(corruptFileBlocks); while (numCorrupt < 3) { Thread.sleep(1000); corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData")); numCorrupt = countPaths(corruptFileBlocks); count++; if (count > 30) break; } // Validate we get all the corrupt files LOG.info("Namenode has bad files. " + numCorrupt); assertTrue(numCorrupt == 3); util.cleanup(fs, "/corruptData"); util.cleanup(fs, "/goodData"); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 14
Source File: TestCheckpoint.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testSecondaryNamenodeError1() throws IOException { LOG.info("Starting testSecondaryNamenodeError1"); Configuration conf = new HdfsConfiguration(); Path file1 = new Path("checkpointxx.dat"); MiniDFSCluster cluster = null; FileSystem fileSys = null; SecondaryNameNode secondary = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) .build(); cluster.waitActive(); fileSys = cluster.getFileSystem(); assertTrue(!fileSys.exists(file1)); // Make the checkpoint fail after rolling the edits log. secondary = startSecondaryNameNode(conf); Mockito.doThrow(new IOException( "Injecting failure after rolling edit logs")) .when(faultInjector).afterSecondaryCallsRollEditLog(); try { secondary.doCheckpoint(); // this should fail assertTrue(false); } catch (IOException e) { // expected } Mockito.reset(faultInjector); // // Create a new file // writeFile(fileSys, file1, replication); checkFile(fileSys, file1, replication); } finally { fileSys.close(); cleanup(secondary); secondary = null; cleanup(cluster); cluster = null; } // // Restart cluster and verify that file exists. // Then take another checkpoint to verify that the // namenode restart accounted for the rolled edit logs. // try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) .format(false).build(); cluster.waitActive(); fileSys = cluster.getFileSystem(); checkFile(fileSys, file1, replication); cleanupFile(fileSys, file1); secondary = startSecondaryNameNode(conf); secondary.doCheckpoint(); secondary.shutdown(); } finally { fileSys.close(); cleanup(secondary); secondary = null; cleanup(cluster); cluster = null; } }
Example 15
Source File: TestCheckpoint.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testSecondaryNamenodeError3() throws IOException { LOG.info("Starting testSecondaryNamenodeError3"); Configuration conf = new HdfsConfiguration(); Path file1 = new Path("checkpointzz.dat"); MiniDFSCluster cluster = null; FileSystem fileSys = null; SecondaryNameNode secondary = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) .build(); cluster.waitActive(); fileSys = cluster.getFileSystem(); assertTrue(!fileSys.exists(file1)); // // Make the checkpoint fail after rolling the edit log. // secondary = startSecondaryNameNode(conf); Mockito.doThrow(new IOException( "Injecting failure after rolling edit logs")) .when(faultInjector).afterSecondaryCallsRollEditLog(); try { secondary.doCheckpoint(); // this should fail assertTrue(false); } catch (IOException e) { // expected } Mockito.reset(faultInjector); secondary.shutdown(); // secondary namenode crash! // start new instance of secondary and verify that // a new rollEditLog suceedes inspite of the fact that // edits.new already exists. // secondary = startSecondaryNameNode(conf); secondary.doCheckpoint(); // this should work correctly // // Create a new file // writeFile(fileSys, file1, replication); checkFile(fileSys, file1, replication); } finally { fileSys.close(); cleanup(secondary); secondary = null; cleanup(cluster); cluster = null; } // // Restart cluster and verify that file exists. // Then take another checkpoint to verify that the // namenode restart accounted for the twice-rolled edit logs. // try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) .format(false).build(); cluster.waitActive(); fileSys = cluster.getFileSystem(); checkFile(fileSys, file1, replication); cleanupFile(fileSys, file1); secondary = startSecondaryNameNode(conf); secondary.doCheckpoint(); secondary.shutdown(); } finally { fileSys.close(); cleanup(secondary); secondary = null; cleanup(cluster); cluster = null; } }
Example 16
Source File: TestListCorruptFileBlocks.java From hadoop with Apache License 2.0 | 4 votes |
@Test (timeout=300000) public void testlistCorruptFileBlocks() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans // directories FileSystem fs = null; MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil util = new DFSTestUtil.Builder(). setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1). setMaxSize(1024).build(); util.createFiles(fs, "/corruptData"); final NameNode namenode = cluster.getNameNode(); Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks = namenode.getNamesystem().listCorruptFileBlocks("/corruptData", null); int numCorrupt = corruptFileBlocks.size(); assertTrue(numCorrupt == 0); // delete the blocks String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i = 0; i < 4; i++) { for (int j = 0; j <= 1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; // assertTrue("Blocks do not exist in data-dir", (blocks != null) && // (blocks.length > 0)); for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); LOG.info("Deliberately removing file " + blockFile.getName()); assertTrue("Cannot remove file.", blockFile.delete()); LOG.info("Deliberately removing file " + metadataFile.getName()); assertTrue("Cannot remove file.", metadataFile.delete()); // break; } } } int count = 0; corruptFileBlocks = namenode.getNamesystem(). listCorruptFileBlocks("/corruptData", null); numCorrupt = corruptFileBlocks.size(); while (numCorrupt < 3) { Thread.sleep(1000); corruptFileBlocks = namenode.getNamesystem() .listCorruptFileBlocks("/corruptData", null); numCorrupt = corruptFileBlocks.size(); count++; if (count > 30) break; } // Validate we get all the corrupt files LOG.info("Namenode has bad files. " + numCorrupt); assertTrue(numCorrupt == 3); // test the paging here FSNamesystem.CorruptFileBlockInfo[] cfb = corruptFileBlocks .toArray(new FSNamesystem.CorruptFileBlockInfo[0]); // now get the 2nd and 3rd file that is corrupt String[] cookie = new String[]{"1"}; Collection<FSNamesystem.CorruptFileBlockInfo> nextCorruptFileBlocks = namenode.getNamesystem() .listCorruptFileBlocks("/corruptData", cookie); FSNamesystem.CorruptFileBlockInfo[] ncfb = nextCorruptFileBlocks .toArray(new FSNamesystem.CorruptFileBlockInfo[0]); numCorrupt = nextCorruptFileBlocks.size(); assertTrue(numCorrupt == 2); assertTrue(ncfb[0].block.getBlockName() .equalsIgnoreCase(cfb[1].block.getBlockName())); corruptFileBlocks = namenode.getNamesystem() .listCorruptFileBlocks("/corruptData", cookie); numCorrupt = corruptFileBlocks.size(); assertTrue(numCorrupt == 0); // Do a listing on a dir which doesn't have any corrupt blocks and // validate util.createFiles(fs, "/goodData"); corruptFileBlocks = namenode.getNamesystem().listCorruptFileBlocks("/goodData", null); numCorrupt = corruptFileBlocks.size(); assertTrue(numCorrupt == 0); util.cleanup(fs, "/corruptData"); util.cleanup(fs, "/goodData"); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 17
Source File: TestOfflineImageViewer.java From big-c with Apache License 2.0 | 4 votes |
@BeforeClass public static void createOriginalFSImage() throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); conf.setLong( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000); conf.setLong( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000); conf.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT"); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem hdfs = cluster.getFileSystem(); // Create a reasonable namespace for (int i = 0; i < NUM_DIRS; i++) { Path dir = new Path("/dir" + i); hdfs.mkdirs(dir); writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString())); for (int j = 0; j < FILES_PER_DIR; j++) { Path file = new Path(dir, "file" + j); FSDataOutputStream o = hdfs.create(file); o.write(23); o.close(); writtenFiles.put(file.toString(), pathToFileEntry(hdfs, file.toString())); } } // Create an empty directory Path emptydir = new Path("/emptydir"); hdfs.mkdirs(emptydir); writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir)); //Create a directory whose name should be escaped in XML Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here"); hdfs.mkdirs(invalidXMLDir); // Get delegation tokens so we log the delegation token op Token<?>[] delegationTokens = hdfs .addDelegationTokens(TEST_RENEWER, null); for (Token<?> t : delegationTokens) { LOG.debug("got token " + t); } final Path snapshot = new Path("/snapshot"); hdfs.mkdirs(snapshot); hdfs.allowSnapshot(snapshot); hdfs.mkdirs(new Path("/snapshot/1")); hdfs.delete(snapshot, true); // Set XAttrs so the fsimage contains XAttr ops final Path xattr = new Path("/xattr"); hdfs.mkdirs(xattr); hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 }); hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 }); // OIV should be able to handle empty value XAttrs hdfs.setXAttr(xattr, "user.a3", null); writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr)); // Write results to the fsimage file hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); hdfs.saveNamespace(); // Determine location of fsimage file originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0)); if (originalFsimage == null) { throw new RuntimeException("Didn't generate or can't find fsimage"); } LOG.debug("original FS image file is " + originalFsimage); } finally { if (cluster != null) cluster.shutdown(); } }
Example 18
Source File: TestSequentialBlockId.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test that collisions in the block ID space are handled gracefully. * * @throws IOException */ @Test public void testTriggerBlockIdCollision() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); try { cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); FSNamesystem fsn = cluster.getNamesystem(); final int blockCount = 10; // Create a file with a few blocks to rev up the global block ID // counter. Path path1 = new Path("testBlockIdCollisionDetection_file1.dat"); DFSTestUtil.createFile( fs, path1, IO_SIZE, BLOCK_SIZE * blockCount, BLOCK_SIZE, REPLICATION, SEED); List<LocatedBlock> blocks1 = DFSTestUtil.getAllBlocks(fs, path1); // Rewind the block ID counter in the name system object. This will result // in block ID collisions when we try to allocate new blocks. SequentialBlockIdGenerator blockIdGenerator = fsn.getBlockIdManager() .getBlockIdGenerator(); blockIdGenerator.setCurrentValue(blockIdGenerator.getCurrentValue() - 5); // Trigger collisions by creating a new file. Path path2 = new Path("testBlockIdCollisionDetection_file2.dat"); DFSTestUtil.createFile( fs, path2, IO_SIZE, BLOCK_SIZE * blockCount, BLOCK_SIZE, REPLICATION, SEED); List<LocatedBlock> blocks2 = DFSTestUtil.getAllBlocks(fs, path2); assertThat(blocks2.size(), is(blockCount)); // Make sure that file2 block IDs start immediately after file1 assertThat(blocks2.get(0).getBlock().getBlockId(), is(blocks1.get(9).getBlock().getBlockId() + 1)); } finally { cluster.shutdown(); } }
Example 19
Source File: TestPipelinesFailover.java From hadoop with Apache License 2.0 | 4 votes |
private void doTestWriteOverFailoverWithDnFail(TestScenario scenario) throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); FSDataOutputStream stm = null; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(5) .build(); try { cluster.waitActive(); cluster.transitionToActive(0); Thread.sleep(500); LOG.info("Starting with NN 0 active"); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); stm = fs.create(TEST_PATH); // write a block and a half AppendTestUtil.write(stm, 0, BLOCK_AND_A_HALF); // Make sure all the blocks are written before failover stm.hflush(); LOG.info("Failing over to NN 1"); scenario.run(cluster); assertTrue(fs.exists(TEST_PATH)); cluster.stopDataNode(0); // write another block and a half AppendTestUtil.write(stm, BLOCK_AND_A_HALF, BLOCK_AND_A_HALF); stm.hflush(); LOG.info("Failing back to NN 0"); cluster.transitionToStandby(1); cluster.transitionToActive(0); cluster.stopDataNode(1); AppendTestUtil.write(stm, BLOCK_AND_A_HALF*2, BLOCK_AND_A_HALF); stm.hflush(); stm.close(); stm = null; AppendTestUtil.check(fs, TEST_PATH, BLOCK_AND_A_HALF * 3); } finally { IOUtils.closeStream(stm); cluster.shutdown(); } }
Example 20
Source File: TestHeartbeatHandling.java From RDFS with Apache License 2.0 | 4 votes |
/** * Test if {@link FSNamesystem#handleHeartbeat(DatanodeRegistration, long, long, long, int, int)} * can pick up replication and/or invalidate requests and * observes the max limit */ public void testHeartbeat() throws Exception { final Configuration conf = new Configuration(); final MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null); try { cluster.waitActive(); final FSNamesystem namesystem = cluster.getNameNode().getNamesystem(); final DatanodeRegistration nodeReg = cluster.getDataNodes().get(0) .getDNRegistrationForNS(cluster.getNameNode().getNamespaceID()); DatanodeDescriptor dd = namesystem.getDatanode(nodeReg); final int REMAINING_BLOCKS = 1; final int MAX_REPLICATE_LIMIT = conf.getInt("dfs.max-repl-streams", 2); final int MAX_INVALIDATE_LIMIT = FSNamesystem.BLOCK_INVALIDATE_CHUNK; final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS; final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS; final DatanodeDescriptor[] ONE_TARGET = new DatanodeDescriptor[1]; synchronized (namesystem.heartbeats) { for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) { dd.addBlockToBeReplicated( new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP), ONE_TARGET); } DatanodeCommand[] cmds = namesystem.handleHeartbeat( nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), dd.getNamespaceUsed(), 0, 0); assertEquals(1, cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction()); assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length); ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS); for (int i=0; i<MAX_INVALIDATE_BLOCKS; i++) { blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP)); } dd.addBlocksToBeInvalidated(blockList); cmds = namesystem.handleHeartbeat( nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), dd.getNamespaceUsed(), 0, 0); assertEquals(2, cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction()); assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length); assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction()); assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length); cmds = namesystem.handleHeartbeat( nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), dd.getNamespaceUsed(), 0, 0); assertEquals(2, cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction()); assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length); assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction()); assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length); cmds = namesystem.handleHeartbeat( nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), dd.getNamespaceUsed(), 0, 0); assertEquals(1, cmds.length); assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction()); assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length); cmds = namesystem.handleHeartbeat( nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), dd.getNamespaceUsed(), 0, 0); assertEquals(null, cmds); } } finally { cluster.shutdown(); } }