org.apache.hadoop.hdfs.server.common.Storage.StorageState Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.common.Storage.StorageState.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BackupImage.java From hadoop with Apache License 2.0 | 5 votes |
/** * Analyze backup storage directories for consistency.<br> * Recover from incomplete checkpoints if required.<br> * Read VERSION and fstime files if exist.<br> * Do not load image or edits. * * @throws IOException if the node should shutdown. */ void recoverCreateRead() throws IOException { for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); StorageState curState; try { curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage); // sd is locked but not opened switch(curState) { case NON_EXISTENT: // fail if any of the configured storage dirs are inaccessible throw new InconsistentFSStateException(sd.getRoot(), "checkpoint directory does not exist or is not accessible."); case NOT_FORMATTED: // for backup node all directories may be unformatted initially LOG.info("Storage directory " + sd.getRoot() + " is not formatted."); LOG.info("Formatting ..."); sd.clearDirectory(); // create empty current break; case NORMAL: break; default: // recovery is possible sd.doRecover(curState); } if(curState != StorageState.NOT_FORMATTED) { // read and verify consistency with other directories storage.readProperties(sd); } } catch(IOException ioe) { sd.unlock(); throw ioe; } } }
Example #2
Source File: BackupImage.java From big-c with Apache License 2.0 | 5 votes |
/** * Analyze backup storage directories for consistency.<br> * Recover from incomplete checkpoints if required.<br> * Read VERSION and fstime files if exist.<br> * Do not load image or edits. * * @throws IOException if the node should shutdown. */ void recoverCreateRead() throws IOException { for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); StorageState curState; try { curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage); // sd is locked but not opened switch(curState) { case NON_EXISTENT: // fail if any of the configured storage dirs are inaccessible throw new InconsistentFSStateException(sd.getRoot(), "checkpoint directory does not exist or is not accessible."); case NOT_FORMATTED: // for backup node all directories may be unformatted initially LOG.info("Storage directory " + sd.getRoot() + " is not formatted."); LOG.info("Formatting ..."); sd.clearDirectory(); // create empty current break; case NORMAL: break; default: // recovery is possible sd.doRecover(curState); } if(curState != StorageState.NOT_FORMATTED) { // read and verify consistency with other directories storage.readProperties(sd); } } catch(IOException ioe) { sd.unlock(); throw ioe; } } }
Example #3
Source File: FSImage.java From hadoop with Apache License 2.0 | 4 votes |
/** * For each storage directory, performs recovery of incomplete transitions * (eg. upgrade, rollback, checkpoint) and inserts the directory's storage * state into the dataDirStates map. * @param dataDirStates output of storage directory states * @return true if there is at least one valid formatted storage directory */ public static boolean recoverStorageDirs(StartupOption startOpt, NNStorage storage, Map<StorageDirectory, StorageState> dataDirStates) throws IOException { boolean isFormatted = false; // This loop needs to be over all storage dirs, even shared dirs, to make // sure that we properly examine their state, but we make sure we don't // mutate the shared dir below in the actual loop. for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); StorageState curState; if (startOpt == StartupOption.METADATAVERSION) { /* All we need is the layout version. */ storage.readProperties(sd); return true; } try { curState = sd.analyzeStorage(startOpt, storage); // sd is locked but not opened switch(curState) { case NON_EXISTENT: // name-node fails if any of the configured storage dirs are missing throw new InconsistentFSStateException(sd.getRoot(), "storage directory does not exist or is not accessible."); case NOT_FORMATTED: break; case NORMAL: break; default: // recovery is possible sd.doRecover(curState); } if (curState != StorageState.NOT_FORMATTED && startOpt != StartupOption.ROLLBACK) { // read and verify consistency with other directories storage.readProperties(sd, startOpt); isFormatted = true; } if (startOpt == StartupOption.IMPORT && isFormatted) // import of a checkpoint is allowed only into empty image directories throw new IOException("Cannot import image from a checkpoint. " + " NameNode already contains an image in " + sd.getRoot()); } catch (IOException ioe) { sd.unlock(); throw ioe; } dataDirStates.put(sd,curState); } return isFormatted; }
Example #4
Source File: SecondaryNameNode.java From hadoop with Apache License 2.0 | 4 votes |
/** * Analyze checkpoint directories. * Create directories if they do not exist. * Recover from an unsuccessful checkpoint if necessary. * * @throws IOException */ void recoverCreate(boolean format) throws IOException { storage.attemptRestoreRemovedStorage(); storage.unlockAll(); for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); boolean isAccessible = true; try { // create directories if don't exist yet if(!sd.getRoot().mkdirs()) { // do nothing, directory is already created } } catch(SecurityException se) { isAccessible = false; } if(!isAccessible) throw new InconsistentFSStateException(sd.getRoot(), "cannot access checkpoint directory."); if (format) { // Don't confirm, since this is just the secondary namenode. LOG.info("Formatting storage directory " + sd); sd.clearDirectory(); } StorageState curState; try { curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage); // sd is locked but not opened switch(curState) { case NON_EXISTENT: // fail if any of the configured checkpoint dirs are inaccessible throw new InconsistentFSStateException(sd.getRoot(), "checkpoint directory does not exist or is not accessible."); case NOT_FORMATTED: break; // it's ok since initially there is no current and VERSION case NORMAL: // Read the VERSION file. This verifies that: // (a) the VERSION file for each of the directories is the same, // and (b) when we connect to a NN, we can verify that the remote // node matches the same namespace that we ran on previously. storage.readProperties(sd); break; default: // recovery is possible sd.doRecover(curState); } } catch (IOException ioe) { sd.unlock(); throw ioe; } } }
Example #5
Source File: FSImage.java From big-c with Apache License 2.0 | 4 votes |
/** * For each storage directory, performs recovery of incomplete transitions * (eg. upgrade, rollback, checkpoint) and inserts the directory's storage * state into the dataDirStates map. * @param dataDirStates output of storage directory states * @return true if there is at least one valid formatted storage directory */ public static boolean recoverStorageDirs(StartupOption startOpt, NNStorage storage, Map<StorageDirectory, StorageState> dataDirStates) throws IOException { boolean isFormatted = false; // This loop needs to be over all storage dirs, even shared dirs, to make // sure that we properly examine their state, but we make sure we don't // mutate the shared dir below in the actual loop. for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); StorageState curState; if (startOpt == StartupOption.METADATAVERSION) { /* All we need is the layout version. */ storage.readProperties(sd); return true; } try { curState = sd.analyzeStorage(startOpt, storage); // sd is locked but not opened switch(curState) { case NON_EXISTENT: // name-node fails if any of the configured storage dirs are missing throw new InconsistentFSStateException(sd.getRoot(), "storage directory does not exist or is not accessible."); case NOT_FORMATTED: break; case NORMAL: break; default: // recovery is possible sd.doRecover(curState); } if (curState != StorageState.NOT_FORMATTED && startOpt != StartupOption.ROLLBACK) { // read and verify consistency with other directories storage.readProperties(sd, startOpt); isFormatted = true; } if (startOpt == StartupOption.IMPORT && isFormatted) // import of a checkpoint is allowed only into empty image directories throw new IOException("Cannot import image from a checkpoint. " + " NameNode already contains an image in " + sd.getRoot()); } catch (IOException ioe) { sd.unlock(); throw ioe; } dataDirStates.put(sd,curState); } return isFormatted; }
Example #6
Source File: SecondaryNameNode.java From big-c with Apache License 2.0 | 4 votes |
/** * Analyze checkpoint directories. * Create directories if they do not exist. * Recover from an unsuccessful checkpoint if necessary. * * @throws IOException */ void recoverCreate(boolean format) throws IOException { storage.attemptRestoreRemovedStorage(); storage.unlockAll(); for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); boolean isAccessible = true; try { // create directories if don't exist yet if(!sd.getRoot().mkdirs()) { // do nothing, directory is already created } } catch(SecurityException se) { isAccessible = false; } if(!isAccessible) throw new InconsistentFSStateException(sd.getRoot(), "cannot access checkpoint directory."); if (format) { // Don't confirm, since this is just the secondary namenode. LOG.info("Formatting storage directory " + sd); sd.clearDirectory(); } StorageState curState; try { curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage); // sd is locked but not opened switch(curState) { case NON_EXISTENT: // fail if any of the configured checkpoint dirs are inaccessible throw new InconsistentFSStateException(sd.getRoot(), "checkpoint directory does not exist or is not accessible."); case NOT_FORMATTED: break; // it's ok since initially there is no current and VERSION case NORMAL: // Read the VERSION file. This verifies that: // (a) the VERSION file for each of the directories is the same, // and (b) when we connect to a NN, we can verify that the remote // node matches the same namespace that we ran on previously. storage.readProperties(sd); break; default: // recovery is possible sd.doRecover(curState); } } catch (IOException ioe) { sd.unlock(); throw ioe; } } }
Example #7
Source File: DataStorage.java From RDFS with Apache License 2.0 | 4 votes |
ArrayList<StorageDirectory> analyzeStorageDirs(NamespaceInfo nsInfo, Collection<File> dataDirs, StartupOption startOpt ) throws IOException { if (storageID == null) this.storageID = ""; if (storageDirs == null) { this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size()); } else { ((ArrayList<StorageDirectory>) storageDirs) .ensureCapacity(storageDirs.size() + dataDirs.size()); } ArrayList<StorageDirectory> newDirs = new ArrayList<StorageDirectory>( dataDirs.size()); ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>(dataDirs.size()); for(Iterator<File> it = dataDirs.iterator(); it.hasNext();) { File dataDir = it.next(); StorageDirectory sd = new StorageDirectory(dataDir); StorageState curState; try { curState = sd.analyzeStorage(startOpt); // sd is locked but not opened switch(curState) { case NORMAL: break; case NON_EXISTENT: // ignore this storage LOG.info("Storage directory " + dataDir + " does not exist."); it.remove(); continue; case NOT_FORMATTED: // format LOG.info("Storage directory " + dataDir + " is not formatted."); if (!sd.isEmpty()) { LOG.error("Storage directory " + dataDir + " is not empty, and will not be formatted! Exiting."); throw new IOException( "Storage directory " + dataDir + " is not empty!"); } LOG.info("Formatting ..."); format(sd, nsInfo); break; default: // recovery part is common sd.doRecover(curState); } } catch (IOException ioe) { try { sd.unlock(); } catch (IOException e) { LOG.warn("Exception when unlocking storage directory", e); } LOG.warn("Ignoring storage directory " + dataDir, ioe); //continue with other good dirs continue; } // add to the storage list addStorageDir(sd); newDirs.add(sd); dataDirStates.add(curState); } if (dataDirs.size() == 0) // none of the data dirs exist throw new IOException( "All specified directories are not accessible or do not exist."); return newDirs; }