Java Code Examples for org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getCurrentDir()
The following examples show how to use
org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getCurrentDir() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NNUpgradeUtil.java From hadoop with Apache License 2.0 | 6 votes |
/** * Rename the existing current dir to previous.tmp, and create a new empty * current dir. */ public static void renameCurToTmp(StorageDirectory sd) throws IOException { File curDir = sd.getCurrentDir(); File prevDir = sd.getPreviousDir(); final File tmpDir = sd.getPreviousTmp(); Preconditions.checkState(curDir.exists(), "Current directory must exist for preupgrade."); Preconditions.checkState(!prevDir.exists(), "Previous directory must not exist for preupgrade."); Preconditions.checkState(!tmpDir.exists(), "Previous.tmp directory must not exist for preupgrade." + "Consider restarting for recovery."); // rename current to tmp NNStorage.rename(curDir, tmpDir); if (!curDir.mkdir()) { throw new IOException("Cannot create directory " + curDir); } }
Example 2
Source File: NNUpgradeUtil.java From hadoop with Apache License 2.0 | 6 votes |
/** * Perform any steps that must succeed across all storage dirs/JournalManagers * involved in an upgrade before proceeding onto the actual upgrade stage. If * a call to any JM's or local storage dir's doPreUpgrade method fails, then * doUpgrade will not be called for any JM. The existing current dir is * renamed to previous.tmp, and then a new, empty current dir is created. * * @param conf configuration for creating {@link EditLogFileOutputStream} * @param sd the storage directory to perform the pre-upgrade procedure. * @throws IOException in the event of error */ static void doPreUpgrade(Configuration conf, StorageDirectory sd) throws IOException { LOG.info("Starting upgrade of storage directory " + sd.getRoot()); // rename current to tmp renameCurToTmp(sd); final File curDir = sd.getCurrentDir(); final File tmpDir = sd.getPreviousTmp(); List<String> fileNameList = IOUtils.listDirectory(tmpDir, new FilenameFilter() { @Override public boolean accept(File dir, String name) { return dir.equals(tmpDir) && name.startsWith(NNStorage.NameNodeFile.EDITS.getName()); } }); for (String s : fileNameList) { File prevFile = new File(tmpDir, s); File newFile = new File(curDir, prevFile.getName()); Files.createLink(newFile.toPath(), prevFile.toPath()); } }
Example 3
Source File: NameSpaceSliceStorage.java From RDFS with Apache License 2.0 | 6 votes |
/** * Format a namespace slice storage. * @param sd the namespace storage * @param nsInfo the name space info * @throws IOException Signals that an I/O exception has occurred. */ private void format(StorageDirectory nsSdir, NamespaceInfo nsInfo) throws IOException { LOG.info("Formatting namespace " + namespaceID + " directory " + nsSdir.getCurrentDir()); nsSdir.clearDirectory(); // create directory File rbwDir = new File(nsSdir.getCurrentDir(), STORAGE_DIR_RBW); File finalizedDir = new File(nsSdir.getCurrentDir(), STORAGE_DIR_FINALIZED); LOG.info("Creating Directories : " + rbwDir + ", " + finalizedDir); if (!rbwDir.mkdirs() || !finalizedDir.mkdirs()) { throw new IOException("Cannot create directories : " + rbwDir + ", " + finalizedDir); } this.layoutVersion = FSConstants.LAYOUT_VERSION; this.cTime = nsInfo.getCTime(); this.namespaceID = nsInfo.getNamespaceID(); this.storageType = NodeType.DATA_NODE; nsSdir.write(); }
Example 4
Source File: NNUpgradeUtil.java From big-c with Apache License 2.0 | 6 votes |
/** * Perform rollback of the storage dir to the previous state. The existing * current dir is removed, and the previous dir is renamed to current. * * @param sd the storage directory to roll back. * @throws IOException in the event of error */ static void doRollBack(StorageDirectory sd) throws IOException { File prevDir = sd.getPreviousDir(); if (!prevDir.exists()) { return; } File tmpDir = sd.getRemovedTmp(); Preconditions.checkState(!tmpDir.exists(), "removed.tmp directory must not exist for rollback." + "Consider restarting for recovery."); // rename current to tmp File curDir = sd.getCurrentDir(); Preconditions.checkState(curDir.exists(), "Current directory must exist for rollback."); NNStorage.rename(curDir, tmpDir); // rename previous to current NNStorage.rename(prevDir, curDir); // delete tmp dir NNStorage.deleteDir(tmpDir); LOG.info("Rollback of " + sd.getRoot() + " is complete."); }
Example 5
Source File: NNUpgradeUtil.java From big-c with Apache License 2.0 | 6 votes |
/** * Rename the existing current dir to previous.tmp, and create a new empty * current dir. */ public static void renameCurToTmp(StorageDirectory sd) throws IOException { File curDir = sd.getCurrentDir(); File prevDir = sd.getPreviousDir(); final File tmpDir = sd.getPreviousTmp(); Preconditions.checkState(curDir.exists(), "Current directory must exist for preupgrade."); Preconditions.checkState(!prevDir.exists(), "Previous directory must not exist for preupgrade."); Preconditions.checkState(!tmpDir.exists(), "Previous.tmp directory must not exist for preupgrade." + "Consider restarting for recovery."); // rename current to tmp NNStorage.rename(curDir, tmpDir); if (!curDir.mkdir()) { throw new IOException("Cannot create directory " + curDir); } }
Example 6
Source File: TestEditLog.java From big-c with Apache License 2.0 | 5 votes |
private void assertExistsInStorageDirs(MiniDFSCluster cluster, NameNodeDirType dirType, String filename) { NNStorage storage = cluster.getNamesystem().getFSImage().getStorage(); for (StorageDirectory sd : storage.dirIterable(dirType)) { File f = new File(sd.getCurrentDir(), filename); assertTrue("Expect that " + f + " exists", f.exists()); } }
Example 7
Source File: TestEditLogRace.java From big-c with Apache License 2.0 | 5 votes |
private long verifyEditLogs(FSNamesystem namesystem, FSImage fsimage, String logFileName, long startTxId) throws IOException { long numEdits = -1; // Verify that we can read in all the transactions that we have written. // If there were any corruptions, it is likely that the reading in // of these transactions will throw an exception. for (StorageDirectory sd : fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) { File editFile = new File(sd.getCurrentDir(), logFileName); System.out.println("Verifying file: " + editFile); FSEditLogLoader loader = new FSEditLogLoader(namesystem, startTxId); long numEditsThisLog = loader.loadFSEdits( new EditLogFileInputStream(editFile), startTxId); System.out.println("Number of edits: " + numEditsThisLog); assertTrue(numEdits == -1 || numEditsThisLog == numEdits); numEdits = numEditsThisLog; } assertTrue(numEdits != -1); return numEdits; }
Example 8
Source File: SecondaryNameNode.java From big-c with Apache License 2.0 | 5 votes |
/** * Ensure that the current/ directory exists in all storage * directories */ void ensureCurrentDirExists() throws IOException { for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); File curDir = sd.getCurrentDir(); if (!curDir.exists() && !curDir.mkdirs()) { throw new IOException("Could not create directory " + curDir); } } }
Example 9
Source File: FSImageTestUtil.java From hadoop with Apache License 2.0 | 5 votes |
public static void logStorageContents(Log LOG, NNStorage storage) { LOG.info("current storages and corresponding sizes:"); for (StorageDirectory sd : storage.dirIterable(null)) { File curDir = sd.getCurrentDir(); LOG.info("In directory " + curDir); File[] files = curDir.listFiles(); Arrays.sort(files); for (File f : files) { LOG.info(" file " + f.getAbsolutePath() + "; len = " + f.length()); } } }
Example 10
Source File: FSImageTestUtil.java From hadoop with Apache License 2.0 | 5 votes |
/** * @return the latest edits log, finalized or otherwise, from the given * storage directory. */ public static EditLogFile findLatestEditsLog(StorageDirectory sd) throws IOException { File currentDir = sd.getCurrentDir(); List<EditLogFile> foundEditLogs = Lists.newArrayList(FileJournalManager.matchEditLogs(currentDir)); return Collections.max(foundEditLogs, EditLogFile.COMPARE_BY_START_TXID); }
Example 11
Source File: TestEditLog.java From hadoop with Apache License 2.0 | 5 votes |
private void assertExistsInStorageDirs(MiniDFSCluster cluster, NameNodeDirType dirType, String filename) { NNStorage storage = cluster.getNamesystem().getFSImage().getStorage(); for (StorageDirectory sd : storage.dirIterable(dirType)) { File f = new File(sd.getCurrentDir(), filename); assertTrue("Expect that " + f + " exists", f.exists()); } }
Example 12
Source File: FSImageTestUtil.java From big-c with Apache License 2.0 | 5 votes |
/** * @return the latest edits log, finalized or otherwise, from the given * storage directory. */ public static EditLogFile findLatestEditsLog(StorageDirectory sd) throws IOException { File currentDir = sd.getCurrentDir(); List<EditLogFile> foundEditLogs = Lists.newArrayList(FileJournalManager.matchEditLogs(currentDir)); return Collections.max(foundEditLogs, EditLogFile.COMPARE_BY_START_TXID); }
Example 13
Source File: SecondaryNameNode.java From hadoop with Apache License 2.0 | 5 votes |
/** * Ensure that the current/ directory exists in all storage * directories */ void ensureCurrentDirExists() throws IOException { for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); File curDir = sd.getCurrentDir(); if (!curDir.exists() && !curDir.mkdirs()) { throw new IOException("Could not create directory " + curDir); } } }
Example 14
Source File: FSImageTestUtil.java From big-c with Apache License 2.0 | 5 votes |
public static void logStorageContents(Log LOG, NNStorage storage) { LOG.info("current storages and corresponding sizes:"); for (StorageDirectory sd : storage.dirIterable(null)) { File curDir = sd.getCurrentDir(); LOG.info("In directory " + curDir); File[] files = curDir.listFiles(); Arrays.sort(files); for (File f : files) { LOG.info(" file " + f.getAbsolutePath() + "; len = " + f.length()); } } }
Example 15
Source File: TestCheckpoint.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test case where the NN is configured with a name-only and an edits-only * dir, with storage-restore turned on. In this case, if the name-only dir * disappears and comes back, a new checkpoint after it has been restored * should function correctly. * @throws Exception */ @Test public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; File currentDir = null; Configuration conf = new HdfsConfiguration(); File base_dir = new File(MiniDFSCluster.getBaseDirectory()); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, MiniDFSCluster.getBaseDirectory() + "/name-only"); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, MiniDFSCluster.getBaseDirectory() + "/edits-only"); conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(base_dir, "namesecondary1")).toString()); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true) .manageNameDfsDirs(false).build(); secondary = startSecondaryNameNode(conf); // Checkpoint once secondary.doCheckpoint(); // Now primary NN experiences failure of its only name dir -- fake by // setting its current dir to a-x permissions NamenodeProtocols nn = cluster.getNameNodeRpc(); NNStorage storage = cluster.getNameNode().getFSImage().getStorage(); StorageDirectory sd0 = storage.getStorageDir(0); assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType()); currentDir = sd0.getCurrentDir(); assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000")); // Try to upload checkpoint -- this should fail since there are no // valid storage dirs try { secondary.doCheckpoint(); fail("Did not fail to checkpoint when there are no valid storage dirs"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "No targets in destination storage", ioe); } // Restore the good dir assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755")); nn.restoreFailedStorage("true"); nn.rollEditLog(); // Checkpoint again -- this should upload to the restored name dir secondary.doCheckpoint(); assertNNHasCheckpoints(cluster, ImmutableList.of(8)); assertParallelFilesInvariant(cluster, ImmutableList.of(secondary)); } finally { if (currentDir != null) { FileUtil.chmod(currentDir.getAbsolutePath(), "755"); } cleanup(secondary); secondary = null; cleanup(cluster); cluster = null; } }
Example 16
Source File: TestCheckpoint.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test that, if a storage directory is failed when a checkpoint occurs, * the non-failed storage directory receives the checkpoint. */ @Test public void testCheckpointWithFailedStorageDir() throws Exception { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; File currentDir = null; Configuration conf = new HdfsConfiguration(); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(true).build(); secondary = startSecondaryNameNode(conf); // Checkpoint once secondary.doCheckpoint(); // Now primary NN experiences failure of a volume -- fake by // setting its current dir to a-x permissions NamenodeProtocols nn = cluster.getNameNodeRpc(); NNStorage storage = cluster.getNameNode().getFSImage().getStorage(); StorageDirectory sd0 = storage.getStorageDir(0); StorageDirectory sd1 = storage.getStorageDir(1); currentDir = sd0.getCurrentDir(); FileUtil.setExecutable(currentDir, false); // Upload checkpoint when NN has a bad storage dir. This should // succeed and create the checkpoint in the good dir. secondary.doCheckpoint(); GenericTestUtils.assertExists( new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2))); // Restore the good dir FileUtil.setExecutable(currentDir, true); nn.restoreFailedStorage("true"); nn.rollEditLog(); // Checkpoint again -- this should upload to both dirs secondary.doCheckpoint(); assertNNHasCheckpoints(cluster, ImmutableList.of(8)); assertParallelFilesInvariant(cluster, ImmutableList.of(secondary)); } finally { if (currentDir != null) { FileUtil.setExecutable(currentDir, true); } cleanup(secondary); secondary = null; cleanup(cluster); cluster = null; } }
Example 17
Source File: TestSaveNamespace.java From big-c with Apache License 2.0 | 4 votes |
@Test(timeout=20000) public void testCancelSaveNamespace() throws Exception { Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); FSNamesystem fsn = FSNamesystem.loadFromDisk(conf); // Replace the FSImage with a spy final FSImage image = fsn.getFSImage(); NNStorage storage = image.getStorage(); storage.close(); // unlock any directories that FSNamesystem's initialization may have locked storage.setStorageDirectories( FSNamesystem.getNamespaceDirs(conf), FSNamesystem.getNamespaceEditsDirs(conf)); FSNamesystem spyFsn = spy(fsn); final FSNamesystem finalFsn = spyFsn; DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG); BlockIdManager bid = spy(spyFsn.getBlockIdManager()); Whitebox.setInternalState(finalFsn, "blockIdManager", bid); doAnswer(delayer).when(bid).getGenerationStampV2(); ExecutorService pool = Executors.newFixedThreadPool(2); try { doAnEdit(fsn, 1); final Canceler canceler = new Canceler(); // Save namespace fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER); try { Future<Void> saverFuture = pool.submit(new Callable<Void>() { @Override public Void call() throws Exception { image.saveNamespace(finalFsn, NameNodeFile.IMAGE, canceler); return null; } }); // Wait until saveNamespace calls getGenerationStamp delayer.waitForCall(); // then cancel the saveNamespace Future<Void> cancelFuture = pool.submit(new Callable<Void>() { @Override public Void call() throws Exception { canceler.cancel("cancelled"); return null; } }); // give the cancel call time to run Thread.sleep(500); // allow saveNamespace to proceed - it should check the cancel flag after // this point and throw an exception delayer.proceed(); cancelFuture.get(); saverFuture.get(); fail("saveNamespace did not fail even though cancelled!"); } catch (Throwable t) { GenericTestUtils.assertExceptionContains( "SaveNamespaceCancelledException", t); } LOG.info("Successfully cancelled a saveNamespace"); // Check that we have only the original image and not any // cruft left over from half-finished images FSImageTestUtil.logStorageContents(LOG, storage); for (StorageDirectory sd : storage.dirIterable(null)) { File curDir = sd.getCurrentDir(); GenericTestUtils.assertGlobEquals(curDir, "fsimage_.*", NNStorage.getImageFileName(0), NNStorage.getImageFileName(0) + MD5FileUtils.MD5_SUFFIX); } } finally { fsn.close(); } }
Example 18
Source File: TestSaveNamespace.java From hadoop with Apache License 2.0 | 4 votes |
@Test(timeout=20000) public void testCancelSaveNamespace() throws Exception { Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); FSNamesystem fsn = FSNamesystem.loadFromDisk(conf); // Replace the FSImage with a spy final FSImage image = fsn.getFSImage(); NNStorage storage = image.getStorage(); storage.close(); // unlock any directories that FSNamesystem's initialization may have locked storage.setStorageDirectories( FSNamesystem.getNamespaceDirs(conf), FSNamesystem.getNamespaceEditsDirs(conf)); FSNamesystem spyFsn = spy(fsn); final FSNamesystem finalFsn = spyFsn; DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG); BlockIdManager bid = spy(spyFsn.getBlockIdManager()); Whitebox.setInternalState(finalFsn, "blockIdManager", bid); doAnswer(delayer).when(bid).getGenerationStampV2(); ExecutorService pool = Executors.newFixedThreadPool(2); try { doAnEdit(fsn, 1); final Canceler canceler = new Canceler(); // Save namespace fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER); try { Future<Void> saverFuture = pool.submit(new Callable<Void>() { @Override public Void call() throws Exception { image.saveNamespace(finalFsn, NameNodeFile.IMAGE, canceler); return null; } }); // Wait until saveNamespace calls getGenerationStamp delayer.waitForCall(); // then cancel the saveNamespace Future<Void> cancelFuture = pool.submit(new Callable<Void>() { @Override public Void call() throws Exception { canceler.cancel("cancelled"); return null; } }); // give the cancel call time to run Thread.sleep(500); // allow saveNamespace to proceed - it should check the cancel flag after // this point and throw an exception delayer.proceed(); cancelFuture.get(); saverFuture.get(); fail("saveNamespace did not fail even though cancelled!"); } catch (Throwable t) { GenericTestUtils.assertExceptionContains( "SaveNamespaceCancelledException", t); } LOG.info("Successfully cancelled a saveNamespace"); // Check that we have only the original image and not any // cruft left over from half-finished images FSImageTestUtil.logStorageContents(LOG, storage); for (StorageDirectory sd : storage.dirIterable(null)) { File curDir = sd.getCurrentDir(); GenericTestUtils.assertGlobEquals(curDir, "fsimage_.*", NNStorage.getImageFileName(0), NNStorage.getImageFileName(0) + MD5FileUtils.MD5_SUFFIX); } } finally { fsn.close(); } }
Example 19
Source File: TestCheckpoint.java From big-c with Apache License 2.0 | 4 votes |
/** * Test case where the NN is configured with a name-only and an edits-only * dir, with storage-restore turned on. In this case, if the name-only dir * disappears and comes back, a new checkpoint after it has been restored * should function correctly. * @throws Exception */ @Test public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; File currentDir = null; Configuration conf = new HdfsConfiguration(); File base_dir = new File(MiniDFSCluster.getBaseDirectory()); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, MiniDFSCluster.getBaseDirectory() + "/name-only"); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, MiniDFSCluster.getBaseDirectory() + "/edits-only"); conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(base_dir, "namesecondary1")).toString()); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true) .manageNameDfsDirs(false).build(); secondary = startSecondaryNameNode(conf); // Checkpoint once secondary.doCheckpoint(); // Now primary NN experiences failure of its only name dir -- fake by // setting its current dir to a-x permissions NamenodeProtocols nn = cluster.getNameNodeRpc(); NNStorage storage = cluster.getNameNode().getFSImage().getStorage(); StorageDirectory sd0 = storage.getStorageDir(0); assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType()); currentDir = sd0.getCurrentDir(); assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000")); // Try to upload checkpoint -- this should fail since there are no // valid storage dirs try { secondary.doCheckpoint(); fail("Did not fail to checkpoint when there are no valid storage dirs"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "No targets in destination storage", ioe); } // Restore the good dir assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755")); nn.restoreFailedStorage("true"); nn.rollEditLog(); // Checkpoint again -- this should upload to the restored name dir secondary.doCheckpoint(); assertNNHasCheckpoints(cluster, ImmutableList.of(8)); assertParallelFilesInvariant(cluster, ImmutableList.of(secondary)); } finally { if (currentDir != null) { FileUtil.chmod(currentDir.getAbsolutePath(), "755"); } cleanup(secondary); secondary = null; cleanup(cluster); cluster = null; } }
Example 20
Source File: NNUpgradeUtil.java From big-c with Apache License 2.0 | 3 votes |
/** * Perform any steps that must succeed across all storage dirs/JournalManagers * involved in an upgrade before proceeding onto the actual upgrade stage. If * a call to any JM's or local storage dir's doPreUpgrade method fails, then * doUpgrade will not be called for any JM. The existing current dir is * renamed to previous.tmp, and then a new, empty current dir is created. * * @param conf configuration for creating {@link EditLogFileOutputStream} * @param sd the storage directory to perform the pre-upgrade procedure. * @throws IOException in the event of error */ static void doPreUpgrade(Configuration conf, StorageDirectory sd) throws IOException { LOG.info("Starting upgrade of storage directory " + sd.getRoot()); // rename current to tmp renameCurToTmp(sd); final File curDir = sd.getCurrentDir(); final File tmpDir = sd.getPreviousTmp(); List<String> fileNameList = IOUtils.listDirectory(tmpDir, new FilenameFilter() { @Override public boolean accept(File dir, String name) { return dir.equals(tmpDir) && name.startsWith(NNStorage.NameNodeFile.EDITS.getName()); } }); for (String s : fileNameList) { File prevFile = new File(tmpDir, s); File newFile = new File(curDir, prevFile.getName()); Files.createLink(newFile.toPath(), prevFile.toPath()); } }