Java Code Examples for org.apache.hadoop.hdfs.DistributedFileSystem#rename()
The following examples show how to use
org.apache.hadoop.hdfs.DistributedFileSystem#rename() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSaveNamespace.java From hadoop with Apache License 2.0 | 6 votes |
/** * Test for save namespace should succeed when parent directory renamed with * open lease and destination directory exist. * This test is a regression for HDFS-2827 */ @Test public void testSaveNamespaceWithRenamedLease() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration()) .numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem(); OutputStream out = null; try { fs.mkdirs(new Path("/test-target")); out = fs.create(new Path("/test-source/foo")); // don't close fs.rename(new Path("/test-source/"), new Path("/test-target/")); fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); cluster.getNameNodeRpc().saveNamespace(); fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); } finally { IOUtils.cleanup(LOG, out, fs); if (cluster != null) { cluster.shutdown(); } } }
Example 2
Source File: TestSaveNamespace.java From big-c with Apache License 2.0 | 6 votes |
/** * Test for save namespace should succeed when parent directory renamed with * open lease and destination directory exist. * This test is a regression for HDFS-2827 */ @Test public void testSaveNamespaceWithRenamedLease() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration()) .numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem(); OutputStream out = null; try { fs.mkdirs(new Path("/test-target")); out = fs.create(new Path("/test-source/foo")); // don't close fs.rename(new Path("/test-source/"), new Path("/test-target/")); fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); cluster.getNameNodeRpc().saveNamespace(); fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); } finally { IOUtils.cleanup(LOG, out, fs); if (cluster != null) { cluster.shutdown(); } } }
Example 3
Source File: TestStorageMover.java From hadoop with Apache License 2.0 | 5 votes |
/** * Move hot files to warm and cold, warm files to hot and cold, * and cold files to hot and warm. */ void moveAround(DistributedFileSystem dfs) throws Exception { for(Path srcDir : map.keySet()) { int i = 0; for(Path dstDir : map.keySet()) { if (!srcDir.equals(dstDir)) { final Path src = new Path(srcDir, "file" + i++); final Path dst = new Path(dstDir, srcDir.getName() + "2" + dstDir.getName()); LOG.info("rename " + src + " to " + dst); dfs.rename(src, dst); } } } }
Example 4
Source File: DistCpSync.java From hadoop with Apache License 2.0 | 5 votes |
/** * Finish the rename operations: move all the intermediate files/directories * from the tmp dir to the final targets. */ private static void moveToTarget(DiffInfo[] diffs, DistributedFileSystem targetFs) throws IOException { // sort the diffs based on their target paths to make sure the parent // directories are created first. Arrays.sort(diffs, DiffInfo.targetComparator); for (DiffInfo diff : diffs) { if (diff.target != null) { if (!targetFs.exists(diff.target.getParent())) { targetFs.mkdirs(diff.target.getParent()); } targetFs.rename(diff.getTmp(), diff.target); } } }
Example 5
Source File: TestStorageMover.java From big-c with Apache License 2.0 | 5 votes |
/** * Move hot files to warm and cold, warm files to hot and cold, * and cold files to hot and warm. */ void moveAround(DistributedFileSystem dfs) throws Exception { for(Path srcDir : map.keySet()) { int i = 0; for(Path dstDir : map.keySet()) { if (!srcDir.equals(dstDir)) { final Path src = new Path(srcDir, "file" + i++); final Path dst = new Path(dstDir, srcDir.getName() + "2" + dstDir.getName()); LOG.info("rename " + src + " to " + dst); dfs.rename(src, dst); } } } }
Example 6
Source File: DistCpSync.java From big-c with Apache License 2.0 | 5 votes |
/** * Finish the rename operations: move all the intermediate files/directories * from the tmp dir to the final targets. */ private static void moveToTarget(DiffInfo[] diffs, DistributedFileSystem targetFs) throws IOException { // sort the diffs based on their target paths to make sure the parent // directories are created first. Arrays.sort(diffs, DiffInfo.targetComparator); for (DiffInfo diff : diffs) { if (diff.target != null) { if (!targetFs.exists(diff.target.getParent())) { targetFs.mkdirs(diff.target.getParent()); } targetFs.rename(diff.getTmp(), diff.target); } } }
Example 7
Source File: TestINodeFile.java From hadoop with Apache License 2.0 | 4 votes |
/** * FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to * replace the original INodeDirectory. Before HDFS-4243, the parent field of * all the children INodes of the target INodeDirectory is not changed to * point to the new INodeDirectoryWithQuota. This testcase tests this * scenario. */ @Test public void testGetFullPathNameAfterSetQuota() throws Exception { long fileLen = 1024; replication = 3; Configuration conf = new Configuration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(replication).build(); cluster.waitActive(); FSNamesystem fsn = cluster.getNamesystem(); FSDirectory fsdir = fsn.getFSDirectory(); DistributedFileSystem dfs = cluster.getFileSystem(); // Create a file for test final Path dir = new Path("/dir"); final Path file = new Path(dir, "file"); DFSTestUtil.createFile(dfs, file, fileLen, replication, 0L); // Check the full path name of the INode associating with the file INode fnode = fsdir.getINode(file.toString()); assertEquals(file.toString(), fnode.getFullPathName()); // Call FSDirectory#unprotectedSetQuota which calls // INodeDirectory#replaceChild dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10); INodeDirectory dirNode = getDir(fsdir, dir); assertEquals(dir.toString(), dirNode.getFullPathName()); assertTrue(dirNode.isWithQuota()); final Path newDir = new Path("/newdir"); final Path newFile = new Path(newDir, "file"); // Also rename dir dfs.rename(dir, newDir, Options.Rename.OVERWRITE); // /dir/file now should be renamed to /newdir/file fnode = fsdir.getINode(newFile.toString()); // getFullPathName can return correct result only if the parent field of // child node is set correctly assertEquals(newFile.toString(), fnode.getFullPathName()); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 8
Source File: TestOfflineImageViewer.java From hadoop with Apache License 2.0 | 4 votes |
@BeforeClass public static void createOriginalFSImage() throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); conf.setLong( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000); conf.setLong( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000); conf.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT"); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem hdfs = cluster.getFileSystem(); // Create a reasonable namespace for (int i = 0; i < NUM_DIRS; i++, dirCount++) { Path dir = new Path("/dir" + i); hdfs.mkdirs(dir); writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString())); for (int j = 0; j < FILES_PER_DIR; j++) { Path file = new Path(dir, "file" + j); FSDataOutputStream o = hdfs.create(file); o.write(23); o.close(); writtenFiles.put(file.toString(), pathToFileEntry(hdfs, file.toString())); } } // Create an empty directory Path emptydir = new Path("/emptydir"); hdfs.mkdirs(emptydir); dirCount++; writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir)); //Create a directory whose name should be escaped in XML Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here"); hdfs.mkdirs(invalidXMLDir); dirCount++; // Get delegation tokens so we log the delegation token op Token<?>[] delegationTokens = hdfs .addDelegationTokens(TEST_RENEWER, null); for (Token<?> t : delegationTokens) { LOG.debug("got token " + t); } // Create INodeReference final Path src = new Path("/src"); hdfs.mkdirs(src); dirCount++; writtenFiles.put(src.toString(), hdfs.getFileStatus(src)); final Path orig = new Path("/src/orig"); hdfs.mkdirs(orig); hdfs.allowSnapshot(src); hdfs.createSnapshot(src, "snapshot"); final Path dst = new Path("/dst"); hdfs.rename(orig, dst); dirCount++; writtenFiles.put(dst.toString(), hdfs.getFileStatus(dst)); // Set XAttrs so the fsimage contains XAttr ops final Path xattr = new Path("/xattr"); hdfs.mkdirs(xattr); dirCount++; hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 }); hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 }); // OIV should be able to handle empty value XAttrs hdfs.setXAttr(xattr, "user.a3", null); writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr)); // Write results to the fsimage file hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); hdfs.saveNamespace(); // Determine location of fsimage file originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0)); if (originalFsimage == null) { throw new RuntimeException("Didn't generate or can't find fsimage"); } LOG.debug("original FS image file is " + originalFsimage); } finally { if (cluster != null) cluster.shutdown(); } }
Example 9
Source File: TestINodeFile.java From big-c with Apache License 2.0 | 4 votes |
/** * FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to * replace the original INodeDirectory. Before HDFS-4243, the parent field of * all the children INodes of the target INodeDirectory is not changed to * point to the new INodeDirectoryWithQuota. This testcase tests this * scenario. */ @Test public void testGetFullPathNameAfterSetQuota() throws Exception { long fileLen = 1024; replication = 3; Configuration conf = new Configuration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(replication).build(); cluster.waitActive(); FSNamesystem fsn = cluster.getNamesystem(); FSDirectory fsdir = fsn.getFSDirectory(); DistributedFileSystem dfs = cluster.getFileSystem(); // Create a file for test final Path dir = new Path("/dir"); final Path file = new Path(dir, "file"); DFSTestUtil.createFile(dfs, file, fileLen, replication, 0L); // Check the full path name of the INode associating with the file INode fnode = fsdir.getINode(file.toString()); assertEquals(file.toString(), fnode.getFullPathName()); // Call FSDirectory#unprotectedSetQuota which calls // INodeDirectory#replaceChild dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10); INodeDirectory dirNode = getDir(fsdir, dir); assertEquals(dir.toString(), dirNode.getFullPathName()); assertTrue(dirNode.isWithQuota()); final Path newDir = new Path("/newdir"); final Path newFile = new Path(newDir, "file"); // Also rename dir dfs.rename(dir, newDir, Options.Rename.OVERWRITE); // /dir/file now should be renamed to /newdir/file fnode = fsdir.getINode(newFile.toString()); // getFullPathName can return correct result only if the parent field of // child node is set correctly assertEquals(newFile.toString(), fnode.getFullPathName()); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 10
Source File: OfflineEditsViewerHelper.java From RDFS with Apache License 2.0 | 4 votes |
/** * Run file operations to create edits for all op codes * to be tested. */ private void runOperations() throws IOException { LOG.info("Creating edits by performing fs operations"); // no check, if it's not it throws an exception which is what we want DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); // OP_ADD 0, OP_SET_GENSTAMP 10 Path pathFileCreate = new Path("/file_create"); FSDataOutputStream s = dfs.create(pathFileCreate); // OP_CLOSE 9 s.close(); // OP_RENAME 1 Path pathFileMoved = new Path("/file_moved"); dfs.rename(pathFileCreate, pathFileMoved); // OP_DELETE 2 dfs.delete(pathFileMoved, false); // OP_MKDIR 3 Path pathDirectoryMkdir = new Path("/directory_mkdir"); dfs.mkdirs(pathDirectoryMkdir); // OP_SET_REPLICATION 4 s = dfs.create(pathFileCreate); s.close(); dfs.setReplication(pathFileCreate, (short)1); // OP_SET_PERMISSIONS 7 Short permission = 0777; dfs.setPermission(pathFileCreate, new FsPermission(permission)); // OP_SET_OWNER 8 dfs.setOwner(pathFileCreate, new String("newOwner"), null); // OP_CLOSE 9 see above // OP_SET_GENSTAMP 10 see above // OP_SET_NS_QUOTA 11 obsolete // OP_CLEAR_NS_QUOTA 12 obsolete // OP_TIMES 13 long mtime = 1285195527000L; // Wed, 22 Sep 2010 22:45:27 GMT long atime = mtime; dfs.setTimes(pathFileCreate, mtime, atime); // OP_SET_QUOTA 14 dfs.setQuota(pathDirectoryMkdir, 1000L, FSConstants.QUOTA_DONT_SET); // OP_CONCAT_DELETE 16 Path pathConcatTarget = new Path("/file_concat_target"); Path[] pathConcatFiles = new Path[2]; pathConcatFiles[0] = new Path("/file_concat_0"); pathConcatFiles[1] = new Path("/file_concat_1"); long length = blockSize * 3; // multiple of blocksize for concat short replication = 1; long seed = 1; DFSTestUtil.createFile(dfs, pathConcatTarget, length, replication, seed); DFSTestUtil.createFile(dfs, pathConcatFiles[0], length, replication, seed); DFSTestUtil.createFile(dfs, pathConcatFiles[1], length, replication, seed); dfs.concat(pathConcatTarget, pathConcatFiles, false); // sync to disk, otherwise we parse partial edits cluster.getNameNode().getFSImage().getEditLog().logSync(); dfs.close(); }