Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster#restartDataNodes()
The following examples show how to use
org.apache.hadoop.hdfs.MiniDFSCluster#restartDataNodes() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestDatanodeRestart.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testFinalizedReplicas() throws Exception { // bring up a cluster of 3 Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); try { // test finalized replicas final String TopDir = "/test"; DFSTestUtil util = new DFSTestUtil.Builder(). setName("TestDatanodeRestart").setNumFiles(2).build(); util.createFiles(fs, TopDir, (short)3); util.waitReplication(fs, TopDir, (short)3); util.checkFiles(fs, TopDir); cluster.restartDataNodes(); cluster.waitActive(); util.checkFiles(fs, TopDir); } finally { cluster.shutdown(); } }
Example 2
Source File: TestDatanodeRestart.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testFinalizedReplicas() throws Exception { // bring up a cluster of 3 Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); try { // test finalized replicas final String TopDir = "/test"; DFSTestUtil util = new DFSTestUtil.Builder(). setName("TestDatanodeRestart").setNumFiles(2).build(); util.createFiles(fs, TopDir, (short)3); util.waitReplication(fs, TopDir, (short)3); util.checkFiles(fs, TopDir); cluster.restartDataNodes(); cluster.waitActive(); util.checkFiles(fs, TopDir); } finally { cluster.shutdown(); } }
Example 3
Source File: TestDatanodeRestart.java From RDFS with Apache License 2.0 | 6 votes |
public void testFinalizedReplicas() throws Exception { // bring up a cluster of 3 Configuration conf = new Configuration(); conf.setLong("dfs.block.size", 1024L); conf.setInt("dfs.write.packet.size", 512); MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); try { // test finalized replicas final String TopDir = "/test"; DFSTestUtil util = new DFSTestUtil("TestCrcCorruption", 2, 3, 8 * 1024); util.createFiles(fs, TopDir, (short) 3); util.waitReplication(fs, TopDir, (short) 3); util.checkFiles(fs, TopDir); cluster.restartDataNodes(); cluster.waitActive(); util.checkFiles(fs, TopDir); } finally { cluster.shutdown(); } }
Example 4
Source File: TestListCorruptFileBlocks.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test if NN.listCorruptFiles() returns the right number of results. * The corrupt blocks are detected by the BlockPoolSliceScanner. * Also, test that DFS.listCorruptFileBlocks can make multiple successive * calls. */ @Test (timeout=300000) public void testMaxCorruptFiles() throws Exception { MiniDFSCluster cluster = null; try { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); final int maxCorruptFileBlocks = FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED; // create 110 files with one block each DFSTestUtil util = new DFSTestUtil.Builder().setName("testMaxCorruptFiles"). setNumFiles(maxCorruptFileBlocks * 3).setMaxLevels(1).setMaxSize(512). build(); util.createFiles(fs, "/srcdat2", (short) 1); util.waitReplication(fs, "/srcdat2", (short) 1); // verify that there are no bad blocks. final NameNode namenode = cluster.getNameNode(); Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode. getNamesystem().listCorruptFileBlocks("/srcdat2", null); assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.", badFiles.size() == 0); // Now deliberately blocks from all files final String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i=0; i<4; i++) { for (int j=0; j<=1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); LOG.info("Removing files from " + data_dir); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); assertTrue("Cannot remove file.", blockFile.delete()); assertTrue("Cannot remove file.", metadataFile.delete()); } } } // Occasionally the BlockPoolSliceScanner can run before we have removed // the blocks. Restart the Datanode to trigger the scanner into running // once more. LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner"); cluster.restartDataNodes(); cluster.waitActive(); badFiles = namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null); while (badFiles.size() < maxCorruptFileBlocks) { LOG.info("# of corrupt files is: " + badFiles.size()); Thread.sleep(10000); badFiles = namenode.getNamesystem(). listCorruptFileBlocks("/srcdat2", null); } badFiles = namenode.getNamesystem(). listCorruptFileBlocks("/srcdat2", null); LOG.info("Namenode has bad files. " + badFiles.size()); assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting " + maxCorruptFileBlocks + ".", badFiles.size() == maxCorruptFileBlocks); CorruptFileBlockIterator iter = (CorruptFileBlockIterator) fs.listCorruptFileBlocks(new Path("/srcdat2")); int corruptPaths = countPaths(iter); assertTrue("Expected more than " + maxCorruptFileBlocks + " corrupt file blocks but got " + corruptPaths, corruptPaths > maxCorruptFileBlocks); assertTrue("Iterator should have made more than 1 call but made " + iter.getCallsMade(), iter.getCallsMade() > 1); util.cleanup(fs, "/srcdat2"); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 5
Source File: TestDatanodeRestart.java From hadoop with Apache License 2.0 | 4 votes |
private void testRbwReplicas(MiniDFSCluster cluster, boolean isCorrupt) throws IOException { FSDataOutputStream out = null; FileSystem fs = cluster.getFileSystem(); final Path src = new Path("/test.txt"); try { final int fileLen = 515; // create some rbw replicas on disk byte[] writeBuf = new byte[fileLen]; new Random().nextBytes(writeBuf); out = fs.create(src); out.write(writeBuf); out.hflush(); DataNode dn = cluster.getDataNodes().get(0); for (FsVolumeSpi v : dataset(dn).getVolumes()) { final FsVolumeImpl volume = (FsVolumeImpl)v; File currentDir = volume.getCurrentDir().getParentFile().getParentFile(); File rbwDir = new File(currentDir, "rbw"); for (File file : rbwDir.listFiles()) { if (isCorrupt && Block.isBlockFilename(file)) { new RandomAccessFile(file, "rw").setLength(fileLen-1); // corrupt } } } cluster.restartDataNodes(); cluster.waitActive(); dn = cluster.getDataNodes().get(0); // check volumeMap: one rwr replica String bpid = cluster.getNamesystem().getBlockPoolId(); ReplicaMap replicas = dataset(dn).volumeMap; Assert.assertEquals(1, replicas.size(bpid)); ReplicaInfo replica = replicas.replicas(bpid).iterator().next(); Assert.assertEquals(ReplicaState.RWR, replica.getState()); if (isCorrupt) { Assert.assertEquals((fileLen-1)/512*512, replica.getNumBytes()); } else { Assert.assertEquals(fileLen, replica.getNumBytes()); } dataset(dn).invalidate(bpid, new Block[]{replica}); } finally { IOUtils.closeStream(out); if (fs.exists(src)) { fs.delete(src, false); } fs.close(); } }
Example 6
Source File: DDLPersistenceHDFSTest.java From gemfirexd-oss with Apache License 2.0 | 4 votes |
public void testInsertWithHDFSDown() throws Exception { int clusterPort = AvailablePortHelper.getRandomAvailableTCPPort(); System.setProperty("test.build.data", HDFS_DIR); Configuration hconf = new HdfsConfiguration(); // hconf.set("hadoop.log.dir", "/tmp/hdfs/logs"); hconf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); Builder builder = new MiniDFSCluster.Builder(hconf); builder.numDataNodes(2); builder.nameNodePort(clusterPort); MiniDFSCluster cluster = builder.build(); Properties props = new Properties(); int mcastPort = AvailablePort.getRandomAvailablePort(AvailablePort.JGROUPS); props.put("mcast-port", String.valueOf(mcastPort)); Connection conn = TestUtil.getConnection(props); Statement st = conn.createStatement(); st.execute("create schema emp"); st.execute("set schema emp"); addExpectedException(ConnectException.class); st.execute("create hdfsstore myhdfs namenode 'hdfs://localhost:" + clusterPort + "' homedir '" + HDFS_DIR + "' BATCHTIMEINTERVAL 1 milliseconds"); GemFireCacheImpl.getInstance().getLogger().info("<ExpectedException action=add>" + expectedExceptions + "</ExpectedException>"); st.execute("create table mytab (col1 int primary key) hdfsstore (myhdfs) eviction by criteria (col1 < 1000) evict incoming"); st.execute("insert into mytab values (5)"); //Wait for data to be flushed to hdfs Thread.sleep(5000); //query hdfs, which will open a reader st.execute("select * from mytab -- GEMFIREXD-PROPERTIES queryHDFS=true \n where col1=5"); cluster.shutdownNameNodes(); // try { // st.execute("insert into mytab values (118)"); // fail("expected exception in connecting to unavailable HDFS store"); // } catch (SQLException e) { // if (!"X0Z30".equals(e.getSQLState())) { // throw e; // } // if (!HDFSIOException.class.equals(e.getCause().getClass())) { // throw e; // } // } cluster.restartNameNode(); cluster.restartDataNodes(); //Wait for namenode to leave safe mode Thread.sleep(10000); st.execute("insert into mytab values (118)"); //query hdfs to trigger scan st.execute("select * from mytab -- GEMFIREXD-PROPERTIES queryHDFS=true \n"); GemFireCacheImpl.getInstance().getLogger().info("<ExpectedException action=remove>" + expectedExceptions + "</ExpectedException>"); st.execute("drop table mytab"); st.execute("drop hdfsstore myhdfs"); cluster.shutdownDataNodes(); cluster.shutdownNameNodes(); TestUtil.shutDown(); }
Example 7
Source File: TestListCorruptFileBlocks.java From big-c with Apache License 2.0 | 4 votes |
/** * Test if NN.listCorruptFiles() returns the right number of results. * The corrupt blocks are detected by the BlockPoolSliceScanner. * Also, test that DFS.listCorruptFileBlocks can make multiple successive * calls. */ @Test (timeout=300000) public void testMaxCorruptFiles() throws Exception { MiniDFSCluster cluster = null; try { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); final int maxCorruptFileBlocks = FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED; // create 110 files with one block each DFSTestUtil util = new DFSTestUtil.Builder().setName("testMaxCorruptFiles"). setNumFiles(maxCorruptFileBlocks * 3).setMaxLevels(1).setMaxSize(512). build(); util.createFiles(fs, "/srcdat2", (short) 1); util.waitReplication(fs, "/srcdat2", (short) 1); // verify that there are no bad blocks. final NameNode namenode = cluster.getNameNode(); Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode. getNamesystem().listCorruptFileBlocks("/srcdat2", null); assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.", badFiles.size() == 0); // Now deliberately blocks from all files final String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i=0; i<4; i++) { for (int j=0; j<=1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); LOG.info("Removing files from " + data_dir); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); assertTrue("Cannot remove file.", blockFile.delete()); assertTrue("Cannot remove file.", metadataFile.delete()); } } } // Occasionally the BlockPoolSliceScanner can run before we have removed // the blocks. Restart the Datanode to trigger the scanner into running // once more. LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner"); cluster.restartDataNodes(); cluster.waitActive(); badFiles = namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null); while (badFiles.size() < maxCorruptFileBlocks) { LOG.info("# of corrupt files is: " + badFiles.size()); Thread.sleep(10000); badFiles = namenode.getNamesystem(). listCorruptFileBlocks("/srcdat2", null); } badFiles = namenode.getNamesystem(). listCorruptFileBlocks("/srcdat2", null); LOG.info("Namenode has bad files. " + badFiles.size()); assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting " + maxCorruptFileBlocks + ".", badFiles.size() == maxCorruptFileBlocks); CorruptFileBlockIterator iter = (CorruptFileBlockIterator) fs.listCorruptFileBlocks(new Path("/srcdat2")); int corruptPaths = countPaths(iter); assertTrue("Expected more than " + maxCorruptFileBlocks + " corrupt file blocks but got " + corruptPaths, corruptPaths > maxCorruptFileBlocks); assertTrue("Iterator should have made more than 1 call but made " + iter.getCallsMade(), iter.getCallsMade() > 1); util.cleanup(fs, "/srcdat2"); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 8
Source File: TestDatanodeRestart.java From big-c with Apache License 2.0 | 4 votes |
private void testRbwReplicas(MiniDFSCluster cluster, boolean isCorrupt) throws IOException { FSDataOutputStream out = null; FileSystem fs = cluster.getFileSystem(); final Path src = new Path("/test.txt"); try { final int fileLen = 515; // create some rbw replicas on disk byte[] writeBuf = new byte[fileLen]; new Random().nextBytes(writeBuf); out = fs.create(src); out.write(writeBuf); out.hflush(); DataNode dn = cluster.getDataNodes().get(0); for (FsVolumeSpi v : dataset(dn).getVolumes()) { final FsVolumeImpl volume = (FsVolumeImpl)v; File currentDir = volume.getCurrentDir().getParentFile().getParentFile(); File rbwDir = new File(currentDir, "rbw"); for (File file : rbwDir.listFiles()) { if (isCorrupt && Block.isBlockFilename(file)) { new RandomAccessFile(file, "rw").setLength(fileLen-1); // corrupt } } } cluster.restartDataNodes(); cluster.waitActive(); dn = cluster.getDataNodes().get(0); // check volumeMap: one rwr replica String bpid = cluster.getNamesystem().getBlockPoolId(); ReplicaMap replicas = dataset(dn).volumeMap; Assert.assertEquals(1, replicas.size(bpid)); ReplicaInfo replica = replicas.replicas(bpid).iterator().next(); Assert.assertEquals(ReplicaState.RWR, replica.getState()); if (isCorrupt) { Assert.assertEquals((fileLen-1)/512*512, replica.getNumBytes()); } else { Assert.assertEquals(fileLen, replica.getNumBytes()); } dataset(dn).invalidate(bpid, new Block[]{replica}); } finally { IOUtils.closeStream(out); if (fs.exists(src)) { fs.delete(src, false); } fs.close(); } }
Example 9
Source File: DDLPersistenceHDFSTest.java From gemfirexd-oss with Apache License 2.0 | 4 votes |
public void testInsertWithHDFSDown() throws Exception { int clusterPort = AvailablePortHelper.getRandomAvailableTCPPort(); System.setProperty("test.build.data", HDFS_DIR); Configuration hconf = new HdfsConfiguration(); // hconf.set("hadoop.log.dir", "/tmp/hdfs/logs"); hconf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); Builder builder = new MiniDFSCluster.Builder(hconf); builder.numDataNodes(2); builder.nameNodePort(clusterPort); MiniDFSCluster cluster = builder.build(); Properties props = new Properties(); int mcastPort = AvailablePort.getRandomAvailablePort(AvailablePort.JGROUPS); props.put("mcast-port", String.valueOf(mcastPort)); Connection conn = TestUtil.getConnection(props); Statement st = conn.createStatement(); st.execute("create schema emp"); st.execute("set schema emp"); addExpectedException(ConnectException.class); st.execute("create hdfsstore myhdfs namenode 'hdfs://localhost:" + clusterPort + "' homedir '" + HDFS_DIR + "' BATCHTIMEINTERVAL 1 milliseconds"); GemFireCacheImpl.getInstance().getLogger().info("<ExpectedException action=add>" + expectedExceptions + "</ExpectedException>"); st.execute("create table mytab (col1 int primary key) hdfsstore (myhdfs) eviction by criteria (col1 < 1000) evict incoming"); st.execute("insert into mytab values (5)"); //Wait for data to be flushed to hdfs Thread.sleep(5000); //query hdfs, which will open a reader st.execute("select * from mytab -- GEMFIREXD-PROPERTIES queryHDFS=true \n where col1=5"); cluster.shutdownNameNodes(); // try { // st.execute("insert into mytab values (118)"); // fail("expected exception in connecting to unavailable HDFS store"); // } catch (SQLException e) { // if (!"X0Z30".equals(e.getSQLState())) { // throw e; // } // if (!HDFSIOException.class.equals(e.getCause().getClass())) { // throw e; // } // } cluster.restartNameNode(); cluster.restartDataNodes(); //Wait for namenode to leave safe mode Thread.sleep(10000); st.execute("insert into mytab values (118)"); //query hdfs to trigger scan st.execute("select * from mytab -- GEMFIREXD-PROPERTIES queryHDFS=true \n"); GemFireCacheImpl.getInstance().getLogger().info("<ExpectedException action=remove>" + expectedExceptions + "</ExpectedException>"); st.execute("drop table mytab"); st.execute("drop hdfsstore myhdfs"); cluster.shutdownDataNodes(); cluster.shutdownNameNodes(); TestUtil.shutDown(); }
Example 10
Source File: TestDatanodeRestart.java From RDFS with Apache License 2.0 | 4 votes |
private void testRbwReplicas(MiniDFSCluster cluster, boolean isCorrupt) throws IOException { FSDataOutputStream out = null; try { FileSystem fs = cluster.getFileSystem(); NamespaceInfo nsInfo = cluster.getNameNode().versionRequest(); final int fileLen = 515; // create some rbw replicas on disk byte[] writeBuf = new byte[fileLen]; new Random().nextBytes(writeBuf); final Path src = new Path("/test.txt"); out = fs.create(src); out.write(writeBuf); out.sync(); DataNode dn = cluster.getDataNodes().get(0); // corrupt rbw replicas for (FSVolume volume : ((FSDataset) dn.data).volumes.getVolumes()) { File rbwDir = volume.getRbwDir(nsInfo.getNamespaceID()); for (File file : rbwDir.listFiles()) { if (isCorrupt && Block.isBlockFilename(file.getName())) { new RandomAccessFile(file, "rw").setLength(fileLen - 1); // corrupt } } } cluster.restartDataNodes(); cluster.waitActive(); dn = cluster.getDataNodes().get(0); // check volumeMap: one rbw replica Map<Block, DatanodeBlockInfo> volumeMap = ((FSDataset) (dn.data)).volumeMap.getNamespaceMap(nsInfo.getNamespaceID()); assertEquals(1, volumeMap.size()); Block replica = volumeMap.keySet().iterator().next(); if (isCorrupt) { assertEquals((fileLen - 1), replica.getNumBytes()); } else { assertEquals(fileLen, replica.getNumBytes()); } dn.data.invalidate(nsInfo.getNamespaceID(), new Block[] { replica }); fs.delete(src, false); } finally { IOUtils.closeStream(out); } }