Java Code Examples for org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#clearDirectory()
The following examples show how to use
org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#clearDirectory() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NameSpaceSliceStorage.java From RDFS with Apache License 2.0 | 6 votes |
/** * Format a namespace slice storage. * @param sd the namespace storage * @param nsInfo the name space info * @throws IOException Signals that an I/O exception has occurred. */ private void format(StorageDirectory nsSdir, NamespaceInfo nsInfo) throws IOException { LOG.info("Formatting namespace " + namespaceID + " directory " + nsSdir.getCurrentDir()); nsSdir.clearDirectory(); // create directory File rbwDir = new File(nsSdir.getCurrentDir(), STORAGE_DIR_RBW); File finalizedDir = new File(nsSdir.getCurrentDir(), STORAGE_DIR_FINALIZED); LOG.info("Creating Directories : " + rbwDir + ", " + finalizedDir); if (!rbwDir.mkdirs() || !finalizedDir.mkdirs()) { throw new IOException("Cannot create directories : " + rbwDir + ", " + finalizedDir); } this.layoutVersion = FSConstants.LAYOUT_VERSION; this.cTime = nsInfo.getCTime(); this.namespaceID = nsInfo.getNamespaceID(); this.storageType = NodeType.DATA_NODE; nsSdir.write(); }
Example 2
Source File: BackupImage.java From hadoop with Apache License 2.0 | 5 votes |
/** * Analyze backup storage directories for consistency.<br> * Recover from incomplete checkpoints if required.<br> * Read VERSION and fstime files if exist.<br> * Do not load image or edits. * * @throws IOException if the node should shutdown. */ void recoverCreateRead() throws IOException { for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); StorageState curState; try { curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage); // sd is locked but not opened switch(curState) { case NON_EXISTENT: // fail if any of the configured storage dirs are inaccessible throw new InconsistentFSStateException(sd.getRoot(), "checkpoint directory does not exist or is not accessible."); case NOT_FORMATTED: // for backup node all directories may be unformatted initially LOG.info("Storage directory " + sd.getRoot() + " is not formatted."); LOG.info("Formatting ..."); sd.clearDirectory(); // create empty current break; case NORMAL: break; default: // recovery is possible sd.doRecover(curState); } if(curState != StorageState.NOT_FORMATTED) { // read and verify consistency with other directories storage.readProperties(sd); } } catch(IOException ioe) { sd.unlock(); throw ioe; } } }
Example 3
Source File: BackupImage.java From big-c with Apache License 2.0 | 5 votes |
/** * Analyze backup storage directories for consistency.<br> * Recover from incomplete checkpoints if required.<br> * Read VERSION and fstime files if exist.<br> * Do not load image or edits. * * @throws IOException if the node should shutdown. */ void recoverCreateRead() throws IOException { for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); StorageState curState; try { curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage); // sd is locked but not opened switch(curState) { case NON_EXISTENT: // fail if any of the configured storage dirs are inaccessible throw new InconsistentFSStateException(sd.getRoot(), "checkpoint directory does not exist or is not accessible."); case NOT_FORMATTED: // for backup node all directories may be unformatted initially LOG.info("Storage directory " + sd.getRoot() + " is not formatted."); LOG.info("Formatting ..."); sd.clearDirectory(); // create empty current break; case NORMAL: break; default: // recovery is possible sd.doRecover(curState); } if(curState != StorageState.NOT_FORMATTED) { // read and verify consistency with other directories storage.readProperties(sd); } } catch(IOException ioe) { sd.unlock(); throw ioe; } } }
Example 4
Source File: SecondaryNameNode.java From hadoop with Apache License 2.0 | 4 votes |
/** * Analyze checkpoint directories. * Create directories if they do not exist. * Recover from an unsuccessful checkpoint if necessary. * * @throws IOException */ void recoverCreate(boolean format) throws IOException { storage.attemptRestoreRemovedStorage(); storage.unlockAll(); for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); boolean isAccessible = true; try { // create directories if don't exist yet if(!sd.getRoot().mkdirs()) { // do nothing, directory is already created } } catch(SecurityException se) { isAccessible = false; } if(!isAccessible) throw new InconsistentFSStateException(sd.getRoot(), "cannot access checkpoint directory."); if (format) { // Don't confirm, since this is just the secondary namenode. LOG.info("Formatting storage directory " + sd); sd.clearDirectory(); } StorageState curState; try { curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage); // sd is locked but not opened switch(curState) { case NON_EXISTENT: // fail if any of the configured checkpoint dirs are inaccessible throw new InconsistentFSStateException(sd.getRoot(), "checkpoint directory does not exist or is not accessible."); case NOT_FORMATTED: break; // it's ok since initially there is no current and VERSION case NORMAL: // Read the VERSION file. This verifies that: // (a) the VERSION file for each of the directories is the same, // and (b) when we connect to a NN, we can verify that the remote // node matches the same namespace that we ran on previously. storage.readProperties(sd); break; default: // recovery is possible sd.doRecover(curState); } } catch (IOException ioe) { sd.unlock(); throw ioe; } } }
Example 5
Source File: SecondaryNameNode.java From big-c with Apache License 2.0 | 4 votes |
/** * Analyze checkpoint directories. * Create directories if they do not exist. * Recover from an unsuccessful checkpoint if necessary. * * @throws IOException */ void recoverCreate(boolean format) throws IOException { storage.attemptRestoreRemovedStorage(); storage.unlockAll(); for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); boolean isAccessible = true; try { // create directories if don't exist yet if(!sd.getRoot().mkdirs()) { // do nothing, directory is already created } } catch(SecurityException se) { isAccessible = false; } if(!isAccessible) throw new InconsistentFSStateException(sd.getRoot(), "cannot access checkpoint directory."); if (format) { // Don't confirm, since this is just the secondary namenode. LOG.info("Formatting storage directory " + sd); sd.clearDirectory(); } StorageState curState; try { curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage); // sd is locked but not opened switch(curState) { case NON_EXISTENT: // fail if any of the configured checkpoint dirs are inaccessible throw new InconsistentFSStateException(sd.getRoot(), "checkpoint directory does not exist or is not accessible."); case NOT_FORMATTED: break; // it's ok since initially there is no current and VERSION case NORMAL: // Read the VERSION file. This verifies that: // (a) the VERSION file for each of the directories is the same, // and (b) when we connect to a NN, we can verify that the remote // node matches the same namespace that we ran on previously. storage.readProperties(sd); break; default: // recovery is possible sd.doRecover(curState); } } catch (IOException ioe) { sd.unlock(); throw ioe; } } }