Java Code Examples for org.apache.hadoop.io.IOUtils#listDirectory()
The following examples show how to use
org.apache.hadoop.io.IOUtils#listDirectory() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NNUpgradeUtil.java From hadoop with Apache License 2.0 | 6 votes |
/** * Perform any steps that must succeed across all storage dirs/JournalManagers * involved in an upgrade before proceeding onto the actual upgrade stage. If * a call to any JM's or local storage dir's doPreUpgrade method fails, then * doUpgrade will not be called for any JM. The existing current dir is * renamed to previous.tmp, and then a new, empty current dir is created. * * @param conf configuration for creating {@link EditLogFileOutputStream} * @param sd the storage directory to perform the pre-upgrade procedure. * @throws IOException in the event of error */ static void doPreUpgrade(Configuration conf, StorageDirectory sd) throws IOException { LOG.info("Starting upgrade of storage directory " + sd.getRoot()); // rename current to tmp renameCurToTmp(sd); final File curDir = sd.getCurrentDir(); final File tmpDir = sd.getPreviousTmp(); List<String> fileNameList = IOUtils.listDirectory(tmpDir, new FilenameFilter() { @Override public boolean accept(File dir, String name) { return dir.equals(tmpDir) && name.startsWith(NNStorage.NameNodeFile.EDITS.getName()); } }); for (String s : fileNameList) { File prevFile = new File(tmpDir, s); File newFile = new File(curDir, prevFile.getName()); Files.createLink(newFile.toPath(), prevFile.toPath()); } }
Example 2
Source File: FsVolumeImpl.java From hadoop with Apache License 2.0 | 6 votes |
/** * Get the next subdirectory within the block pool slice. * * @return The next subdirectory within the block pool slice, or * null if there are no more. */ private String getNextSubDir(String prev, File dir) throws IOException { List<String> children = IOUtils.listDirectory(dir, SubdirFilter.INSTANCE); cache = null; cacheMs = 0; if (children.size() == 0) { LOG.trace("getNextSubDir({}, {}): no subdirectories found in {}", storageID, bpid, dir.getAbsolutePath()); return null; } Collections.sort(children); String nextSubDir = nextSorted(children, prev); if (nextSubDir == null) { LOG.trace("getNextSubDir({}, {}): no more subdirectories found in {}", storageID, bpid, dir.getAbsolutePath()); } else { LOG.trace("getNextSubDir({}, {}): picking next subdirectory {} " + "within {}", storageID, bpid, nextSubDir, dir.getAbsolutePath()); } return nextSubDir; }
Example 3
Source File: FsVolumeImpl.java From big-c with Apache License 2.0 | 6 votes |
/** * Get the next subdirectory within the block pool slice. * * @return The next subdirectory within the block pool slice, or * null if there are no more. */ private String getNextSubDir(String prev, File dir) throws IOException { List<String> children = IOUtils.listDirectory(dir, SubdirFilter.INSTANCE); cache = null; cacheMs = 0; if (children.size() == 0) { LOG.trace("getNextSubDir({}, {}): no subdirectories found in {}", storageID, bpid, dir.getAbsolutePath()); return null; } Collections.sort(children); String nextSubDir = nextSorted(children, prev); if (nextSubDir == null) { LOG.trace("getNextSubDir({}, {}): no more subdirectories found in {}", storageID, bpid, dir.getAbsolutePath()); } else { LOG.trace("getNextSubDir({}, {}): picking next subdirectory {} " + "within {}", storageID, bpid, nextSubDir, dir.getAbsolutePath()); } return nextSubDir; }
Example 4
Source File: FsVolumeImpl.java From hadoop with Apache License 2.0 | 5 votes |
private List<String> getSubdirEntries() throws IOException { if (state.curFinalizedSubDir == null) { return null; // There are no entries in the null subdir. } long now = Time.monotonicNow(); if (cache != null) { long delta = now - cacheMs; if (delta < maxStalenessMs) { return cache; } else { LOG.trace("getSubdirEntries({}, {}): purging entries cache for {} " + "after {} ms.", storageID, bpid, state.curFinalizedSubDir, delta); cache = null; } } File dir = Paths.get(bpidDir.getAbsolutePath(), "current", "finalized", state.curFinalizedDir, state.curFinalizedSubDir).toFile(); List<String> entries = IOUtils.listDirectory(dir, BlockFileFilter.INSTANCE); if (entries.size() == 0) { entries = null; } else { Collections.sort(entries); } if (entries == null) { LOG.trace("getSubdirEntries({}, {}): no entries found in {}", storageID, bpid, dir.getAbsolutePath()); } else { LOG.trace("getSubdirEntries({}, {}): listed {} entries in {}", storageID, bpid, entries.size(), dir.getAbsolutePath()); } cache = entries; cacheMs = now; return cache; }
Example 5
Source File: FsVolumeImpl.java From big-c with Apache License 2.0 | 5 votes |
private List<String> getSubdirEntries() throws IOException { if (state.curFinalizedSubDir == null) { return null; // There are no entries in the null subdir. } long now = Time.monotonicNow(); if (cache != null) { long delta = now - cacheMs; if (delta < maxStalenessMs) { return cache; } else { LOG.trace("getSubdirEntries({}, {}): purging entries cache for {} " + "after {} ms.", storageID, bpid, state.curFinalizedSubDir, delta); cache = null; } } File dir = Paths.get(bpidDir.getAbsolutePath(), "current", "finalized", state.curFinalizedDir, state.curFinalizedSubDir).toFile(); List<String> entries = IOUtils.listDirectory(dir, BlockFileFilter.INSTANCE); if (entries.size() == 0) { entries = null; } else { Collections.sort(entries); } if (entries == null) { LOG.trace("getSubdirEntries({}, {}): no entries found in {}", storageID, bpid, dir.getAbsolutePath()); } else { LOG.trace("getSubdirEntries({}, {}): listed {} entries in {}", storageID, bpid, entries.size(), dir.getAbsolutePath()); } cache = entries; cacheMs = now; return cache; }
Example 6
Source File: TestDFSUpgrade.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testPreserveEditLogs() throws Exception { conf = new HdfsConfiguration(); conf = UpgradeUtilities.initializeStorageStateConf(1, conf); String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY); conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false); log("Normal NameNode upgrade", 1); File[] created = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); for (final File createdDir : created) { List<String> fileNameList = IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE); for (String fileName : fileNameList) { String tmpFileName = fileName + ".tmp"; File existingFile = new File(createdDir, fileName); File tmpFile = new File(createdDir, tmpFileName); Files.move(existingFile.toPath(), tmpFile.toPath()); File newFile = new File(createdDir, fileName); Preconditions.checkState(newFile.createNewFile(), "Cannot create new edits log file in " + createdDir); EditLogFileInputStream in = new EditLogFileInputStream(tmpFile, HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, false); EditLogFileOutputStream out = new EditLogFileOutputStream(conf, newFile, (int)tmpFile.length()); out.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + 1); FSEditLogOp logOp = in.readOp(); while (logOp != null) { out.write(logOp); logOp = in.readOp(); } out.setReadyToFlush(); out.flushAndSync(true); out.close(); Files.delete(tmpFile.toPath()); } } cluster = createCluster(); DFSInotifyEventInputStream ieis = cluster.getFileSystem().getInotifyEventStream(0); EventBatch batch = ieis.poll(); Event[] events = batch.getEvents(); assertTrue("Should be able to get transactions before the upgrade.", events.length > 0); assertEquals(events[0].getEventType(), Event.EventType.CREATE); assertEquals(((CreateEvent) events[0]).getPath(), "/TestUpgrade"); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); }
Example 7
Source File: TestDFSUpgrade.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testPreserveEditLogs() throws Exception { conf = new HdfsConfiguration(); conf = UpgradeUtilities.initializeStorageStateConf(1, conf); String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY); conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false); log("Normal NameNode upgrade", 1); File[] created = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); for (final File createdDir : created) { List<String> fileNameList = IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE); for (String fileName : fileNameList) { String tmpFileName = fileName + ".tmp"; File existingFile = new File(createdDir, fileName); File tmpFile = new File(createdDir, tmpFileName); Files.move(existingFile.toPath(), tmpFile.toPath()); File newFile = new File(createdDir, fileName); Preconditions.checkState(newFile.createNewFile(), "Cannot create new edits log file in " + createdDir); EditLogFileInputStream in = new EditLogFileInputStream(tmpFile, HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, false); EditLogFileOutputStream out = new EditLogFileOutputStream(conf, newFile, (int)tmpFile.length()); out.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + 1); FSEditLogOp logOp = in.readOp(); while (logOp != null) { out.write(logOp); logOp = in.readOp(); } out.setReadyToFlush(); out.flushAndSync(true); out.close(); Files.delete(tmpFile.toPath()); } } cluster = createCluster(); DFSInotifyEventInputStream ieis = cluster.getFileSystem().getInotifyEventStream(0); EventBatch batch = ieis.poll(); Event[] events = batch.getEvents(); assertTrue("Should be able to get transactions before the upgrade.", events.length > 0); assertEquals(events[0].getEventType(), Event.EventType.CREATE); assertEquals(((CreateEvent) events[0]).getPath(), "/TestUpgrade"); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); }
Example 8
Source File: NNUpgradeUtil.java From big-c with Apache License 2.0 | 3 votes |
/** * Perform any steps that must succeed across all storage dirs/JournalManagers * involved in an upgrade before proceeding onto the actual upgrade stage. If * a call to any JM's or local storage dir's doPreUpgrade method fails, then * doUpgrade will not be called for any JM. The existing current dir is * renamed to previous.tmp, and then a new, empty current dir is created. * * @param conf configuration for creating {@link EditLogFileOutputStream} * @param sd the storage directory to perform the pre-upgrade procedure. * @throws IOException in the event of error */ static void doPreUpgrade(Configuration conf, StorageDirectory sd) throws IOException { LOG.info("Starting upgrade of storage directory " + sd.getRoot()); // rename current to tmp renameCurToTmp(sd); final File curDir = sd.getCurrentDir(); final File tmpDir = sd.getPreviousTmp(); List<String> fileNameList = IOUtils.listDirectory(tmpDir, new FilenameFilter() { @Override public boolean accept(File dir, String name) { return dir.equals(tmpDir) && name.startsWith(NNStorage.NameNodeFile.EDITS.getName()); } }); for (String s : fileNameList) { File prevFile = new File(tmpDir, s); File newFile = new File(curDir, prevFile.getName()); Files.createLink(newFile.toPath(), prevFile.toPath()); } }