Java Code Examples for org.apache.hadoop.fs.LocalFileSystem#exists()
The following examples show how to use
org.apache.hadoop.fs.LocalFileSystem#exists() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestBloomMapFile.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void setUp() throws Exception { LocalFileSystem fs = FileSystem.getLocal(conf); if (fs.exists(TEST_ROOT) && !fs.delete(TEST_ROOT, true)) { Assert.fail("Can't clean up test root dir"); } fs.mkdirs(TEST_ROOT); }
Example 2
Source File: TestMapFile.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setup() throws Exception { LocalFileSystem fs = FileSystem.getLocal(conf); if (fs.exists(TEST_DIR) && !fs.delete(TEST_DIR, true)) { Assert.fail("Can't clean up test root dir"); } fs.mkdirs(TEST_DIR); }
Example 3
Source File: TestBloomMapFile.java From big-c with Apache License 2.0 | 5 votes |
@Override public void setUp() throws Exception { LocalFileSystem fs = FileSystem.getLocal(conf); if (fs.exists(TEST_ROOT) && !fs.delete(TEST_ROOT, true)) { Assert.fail("Can't clean up test root dir"); } fs.mkdirs(TEST_ROOT); }
Example 4
Source File: TestMapFile.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setup() throws Exception { LocalFileSystem fs = FileSystem.getLocal(conf); if (fs.exists(TEST_DIR) && !fs.delete(TEST_DIR, true)) { Assert.fail("Can't clean up test root dir"); } fs.mkdirs(TEST_DIR); }
Example 5
Source File: UpgradeUtilities.java From RDFS with Apache License 2.0 | 5 votes |
public static File[] createStorageDirs(NodeType nodeType, String[] parents, String dirName, File srcFile) throws Exception { File[] retVal = new File[parents.length]; for (int i = 0; i < parents.length; i++) { File newDir = new File(parents[i], dirName); createEmptyDirs(new String[] {newDir.toString()}); LocalFileSystem localFS = FileSystem.getLocal(new Configuration()); switch (nodeType) { case NAME_NODE: localFS.copyToLocalFile(new Path(srcFile.toString(), "current"), new Path(newDir.toString()), false); Path newImgDir = new Path(newDir.getParent(), "image"); if (!localFS.exists(newImgDir)) localFS.copyToLocalFile( new Path(srcFile.toString(), "image"), newImgDir, false); break; case DATA_NODE: localFS.copyToLocalFile(new Path(srcFile.toString(), "current"), new Path(newDir.toString()), false); Path newStorageFile = new Path(newDir.getParent(), "storage"); if (!localFS.exists(newStorageFile)) localFS.copyToLocalFile( new Path(srcFile.toString(), "storage"), newStorageFile, false); break; } retVal[i] = newDir; } return retVal; }
Example 6
Source File: UpgradeUtilities.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Simulate the <code>dfs.name.dir</code> or <code>dfs.data.dir</code> * of a populated DFS filesystem. * * This method creates and populates the directory specified by * <code>parent/dirName</code>, for each parent directory. * The contents of the new directories will be * appropriate for the given node type. If the directory does not * exist, it will be created. If the directory already exists, it * will first be deleted. * * By default, a singleton master populated storage * directory is created for a Namenode (contains edits, fsimage, * version, and time files) and a Datanode (contains version and * block files). These directories are then * copied by this method to create new storage * directories of the appropriate type (Namenode or Datanode). * * @return the array of created directories */ public static File[] createStorageDirs(NodeType nodeType, String[] parents, String dirName) throws Exception { File[] retVal = new File[parents.length]; for (int i = 0; i < parents.length; i++) { File newDir = new File(parents[i], dirName); createEmptyDirs(new String[] {newDir.toString()}); LocalFileSystem localFS = FileSystem.getLocal(new Configuration()); switch (nodeType) { case NAME_NODE: localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"), new Path(newDir.toString()), false); Path newImgDir = new Path(newDir.getParent(), "image"); if (!localFS.exists(newImgDir)) localFS.copyToLocalFile( new Path(namenodeStorage.toString(), "image"), newImgDir, false); break; case DATA_NODE: localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"), new Path(newDir.toString()), false); Path newStorageFile = new Path(newDir.getParent(), "storage"); if (!localFS.exists(newStorageFile)) localFS.copyToLocalFile( new Path(datanodeStorage.toString(), "storage"), newStorageFile, false); break; } retVal[i] = newDir; } return retVal; }