Java Code Examples for org.apache.hadoop.hdfs.DistributedFileSystem#mkdirs()
The following examples show how to use
org.apache.hadoop.hdfs.DistributedFileSystem#mkdirs() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHDFSFileContextMainOperations.java From big-c with Apache License 2.0 | 6 votes |
/** * Perform operations such as setting quota, deletion of files, rename and * ensure system can apply edits log during startup. */ @Test public void testEditsLogRename() throws Exception { DistributedFileSystem fs = cluster.getFileSystem(); Path src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1"); Path dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1"); createFile(src1); fs.mkdirs(dst1.getParent()); createFile(dst1); // Set quota so that dst1 parent cannot allow under it new files/directories fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET); // Free up quota for a subsequent rename fs.delete(dst1, true); rename(src1, dst1, true, true, false, Rename.OVERWRITE); // Restart the cluster and ensure the above operations can be // loaded from the edits log restartCluster(); fs = cluster.getFileSystem(); src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1"); dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1"); Assert.assertFalse(fs.exists(src1)); // ensure src1 is already renamed Assert.assertTrue(fs.exists(dst1)); // ensure rename dst exists }
Example 2
Source File: TestBootstrapStandbyWithQJM.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { Configuration conf = new Configuration(); // Turn off IPC client caching, so that the suite can handle // the restart of the daemons between test cases. conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0); MiniQJMHACluster miniQjmHaCluster = new MiniQJMHACluster.Builder(conf).build(); cluster = miniQjmHaCluster.getDfsCluster(); jCluster = miniQjmHaCluster.getJournalCluster(); // make nn0 active cluster.transitionToActive(0); // do sth to generate in-progress edit log data DistributedFileSystem dfs = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf); dfs.mkdirs(new Path("/test2")); dfs.close(); }
Example 3
Source File: DistCpSync.java From hadoop with Apache License 2.0 | 5 votes |
/** * Finish the rename operations: move all the intermediate files/directories * from the tmp dir to the final targets. */ private static void moveToTarget(DiffInfo[] diffs, DistributedFileSystem targetFs) throws IOException { // sort the diffs based on their target paths to make sure the parent // directories are created first. Arrays.sort(diffs, DiffInfo.targetComparator); for (DiffInfo diff : diffs) { if (diff.target != null) { if (!targetFs.exists(diff.target.getParent())) { targetFs.mkdirs(diff.target.getParent()); } targetFs.rename(diff.getTmp(), diff.target); } } }
Example 4
Source File: DistCpSync.java From hadoop with Apache License 2.0 | 5 votes |
private static Path createTargetTmpDir(DistributedFileSystem targetFs, Path targetDir) throws IOException { final Path tmp = new Path(targetDir, DistCpConstants.HDFS_DISTCP_DIFF_DIRECTORY_NAME + DistCp.rand.nextInt()); if (!targetFs.mkdirs(tmp)) { throw new IOException("The tmp directory " + tmp + " already exists"); } return tmp; }
Example 5
Source File: SharkQueryServiceTest.java From searchanalytics-bigdata with MIT License | 5 votes |
@Before public void prepareHdfs() { DistributedFileSystem fs = hadoopClusterService.getFileSystem(); Path path = new Path("/searchevents"); try { fs.delete(path, true); fs.mkdirs(path); } catch (IOException e) { e.printStackTrace(); fail(); } generateSearchAnalyticsDataService .generateAndPushSearchEvents(searchEventsCount); }
Example 6
Source File: TestWebHDFS.java From big-c with Apache License 2.0 | 5 votes |
/** * Test snapshot creation through WebHdfs */ @Test public void testWebHdfsCreateSnapshot() throws Exception { MiniDFSCluster cluster = null; final Configuration conf = WebHdfsTestUtil.createConf(); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); final Path foo = new Path("/foo"); dfs.mkdirs(foo); try { webHdfs.createSnapshot(foo); fail("Cannot create snapshot on a non-snapshottable directory"); } catch (Exception e) { GenericTestUtils.assertExceptionContains( "Directory is not a snapshottable directory", e); } // allow snapshots on /foo dfs.allowSnapshot(foo); // create snapshots on foo using WebHdfs webHdfs.createSnapshot(foo, "s1"); // create snapshot without specifying name final Path spath = webHdfs.createSnapshot(foo, null); Assert.assertTrue(webHdfs.exists(spath)); final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1"); Assert.assertTrue(webHdfs.exists(s1path)); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 7
Source File: SecureKnoxShellTest.java From knox with Apache License 2.0 | 5 votes |
/** * Do the heavy lifting here. */ private void webhdfsPutGet() throws Exception { DistributedFileSystem fileSystem = miniDFSCluster.getFileSystem(); Path dir = new Path("/user/guest/example"); fileSystem.delete(dir, true); fileSystem.mkdirs(dir, new FsPermission("777")); fileSystem.setOwner(dir, "guest", "users"); final File jaasFile = setupJaasConf(baseDir, keytab, hdfsPrincipal); final Binding binding = new Binding(); binding.setProperty("jaasConf", jaasFile.getAbsolutePath()); binding.setProperty("krb5conf", krb5conf); binding.setProperty("gateway", driver.getClusterUrl()); URL readme = driver.getResourceUrl("README"); File file = new File(readme.toURI()); binding.setProperty("file", file.getAbsolutePath()); final GroovyShell shell = new GroovyShell(binding); shell.evaluate(getResourceUrl(SCRIPT).toURI()); String status = (String) binding.getProperty("status"); assertNotNull(status); String fetchedFile = (String) binding.getProperty("fetchedFile"); assertNotNull(fetchedFile); assertTrue(fetchedFile.contains("README")); }
Example 8
Source File: TestInputPathHandler.java From hudi with Apache License 2.0 | 5 votes |
static List<Path> generatePartitions(DistributedFileSystem dfs, String basePath) throws IOException { List<Path> paths = new ArrayList<>(); paths.add(new Path(basePath + Path.SEPARATOR + "2019/05/21")); paths.add(new Path(basePath + Path.SEPARATOR + "2019/05/22")); paths.add(new Path(basePath + Path.SEPARATOR + "2019/05/23")); paths.add(new Path(basePath + Path.SEPARATOR + "2019/05/24")); paths.add(new Path(basePath + Path.SEPARATOR + "2019/05/25")); for (Path path: paths) { dfs.mkdirs(path); } return paths; }
Example 9
Source File: TestWebHDFS.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test snapshot rename through WebHdfs */ @Test public void testWebHdfsRenameSnapshot() throws Exception { MiniDFSCluster cluster = null; final Configuration conf = WebHdfsTestUtil.createConf(); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); final Path foo = new Path("/foo"); dfs.mkdirs(foo); dfs.allowSnapshot(foo); webHdfs.createSnapshot(foo, "s1"); final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1"); Assert.assertTrue(webHdfs.exists(s1path)); // rename s1 to s2 webHdfs.renameSnapshot(foo, "s1", "s2"); Assert.assertFalse(webHdfs.exists(s1path)); final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2"); Assert.assertTrue(webHdfs.exists(s2path)); webHdfs.deleteSnapshot(foo, "s2"); Assert.assertFalse(webHdfs.exists(s2path)); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 10
Source File: TestWebHDFS.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test snapshot deletion through WebHdfs */ @Test public void testWebHdfsDeleteSnapshot() throws Exception { MiniDFSCluster cluster = null; final Configuration conf = WebHdfsTestUtil.createConf(); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); final Path foo = new Path("/foo"); dfs.mkdirs(foo); dfs.allowSnapshot(foo); webHdfs.createSnapshot(foo, "s1"); final Path spath = webHdfs.createSnapshot(foo, null); Assert.assertTrue(webHdfs.exists(spath)); final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1"); Assert.assertTrue(webHdfs.exists(s1path)); // delete the two snapshots webHdfs.deleteSnapshot(foo, "s1"); Assert.assertFalse(webHdfs.exists(s1path)); webHdfs.deleteSnapshot(foo, spath.getName()); Assert.assertFalse(webHdfs.exists(spath)); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 11
Source File: TestWebHDFS.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test snapshot creation through WebHdfs */ @Test public void testWebHdfsCreateSnapshot() throws Exception { MiniDFSCluster cluster = null; final Configuration conf = WebHdfsTestUtil.createConf(); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); final Path foo = new Path("/foo"); dfs.mkdirs(foo); try { webHdfs.createSnapshot(foo); fail("Cannot create snapshot on a non-snapshottable directory"); } catch (Exception e) { GenericTestUtils.assertExceptionContains( "Directory is not a snapshottable directory", e); } // allow snapshots on /foo dfs.allowSnapshot(foo); // create snapshots on foo using WebHdfs webHdfs.createSnapshot(foo, "s1"); // create snapshot without specifying name final Path spath = webHdfs.createSnapshot(foo, null); Assert.assertTrue(webHdfs.exists(spath)); final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1"); Assert.assertTrue(webHdfs.exists(s1path)); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 12
Source File: DistCpSync.java From big-c with Apache License 2.0 | 5 votes |
private static Path createTargetTmpDir(DistributedFileSystem targetFs, Path targetDir) throws IOException { final Path tmp = new Path(targetDir, DistCpConstants.HDFS_DISTCP_DIFF_DIRECTORY_NAME + DistCp.rand.nextInt()); if (!targetFs.mkdirs(tmp)) { throw new IOException("The tmp directory " + tmp + " already exists"); } return tmp; }
Example 13
Source File: TestOfflineImageViewer.java From big-c with Apache License 2.0 | 4 votes |
@BeforeClass public static void createOriginalFSImage() throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); conf.setLong( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000); conf.setLong( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000); conf.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT"); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem hdfs = cluster.getFileSystem(); // Create a reasonable namespace for (int i = 0; i < NUM_DIRS; i++) { Path dir = new Path("/dir" + i); hdfs.mkdirs(dir); writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString())); for (int j = 0; j < FILES_PER_DIR; j++) { Path file = new Path(dir, "file" + j); FSDataOutputStream o = hdfs.create(file); o.write(23); o.close(); writtenFiles.put(file.toString(), pathToFileEntry(hdfs, file.toString())); } } // Create an empty directory Path emptydir = new Path("/emptydir"); hdfs.mkdirs(emptydir); writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir)); //Create a directory whose name should be escaped in XML Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here"); hdfs.mkdirs(invalidXMLDir); // Get delegation tokens so we log the delegation token op Token<?>[] delegationTokens = hdfs .addDelegationTokens(TEST_RENEWER, null); for (Token<?> t : delegationTokens) { LOG.debug("got token " + t); } final Path snapshot = new Path("/snapshot"); hdfs.mkdirs(snapshot); hdfs.allowSnapshot(snapshot); hdfs.mkdirs(new Path("/snapshot/1")); hdfs.delete(snapshot, true); // Set XAttrs so the fsimage contains XAttr ops final Path xattr = new Path("/xattr"); hdfs.mkdirs(xattr); hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 }); hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 }); // OIV should be able to handle empty value XAttrs hdfs.setXAttr(xattr, "user.a3", null); writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr)); // Write results to the fsimage file hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); hdfs.saveNamespace(); // Determine location of fsimage file originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0)); if (originalFsimage == null) { throw new RuntimeException("Didn't generate or can't find fsimage"); } LOG.debug("original FS image file is " + originalFsimage); } finally { if (cluster != null) cluster.shutdown(); } }
Example 14
Source File: TestFSImageWithAcl.java From hadoop with Apache License 2.0 | 4 votes |
private void testAcl(boolean persistNamespace) throws IOException { Path p = new Path("/p"); DistributedFileSystem fs = cluster.getFileSystem(); fs.create(p).close(); fs.mkdirs(new Path("/23")); AclEntry e = new AclEntry.Builder().setName("foo") .setPermission(READ_EXECUTE).setScope(ACCESS).setType(USER).build(); fs.modifyAclEntries(p, Lists.newArrayList(e)); restart(fs, persistNamespace); AclStatus s = cluster.getNamesystem().getAclStatus(p.toString()); AclEntry[] returned = Lists.newArrayList(s.getEntries()).toArray( new AclEntry[0]); Assert.assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ_EXECUTE), aclEntry(ACCESS, GROUP, READ) }, returned); fs.removeAcl(p); if (persistNamespace) { fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); fs.saveNamespace(); fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); } cluster.restartNameNode(); cluster.waitActive(); s = cluster.getNamesystem().getAclStatus(p.toString()); returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]); Assert.assertArrayEquals(new AclEntry[] { }, returned); fs.modifyAclEntries(p, Lists.newArrayList(e)); s = cluster.getNamesystem().getAclStatus(p.toString()); returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]); Assert.assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ_EXECUTE), aclEntry(ACCESS, GROUP, READ) }, returned); }
Example 15
Source File: TestFSImageWithAcl.java From big-c with Apache License 2.0 | 4 votes |
private void doTestDefaultAclNewChildren(boolean persistNamespace) throws IOException { Path dirPath = new Path("/dir"); Path filePath = new Path(dirPath, "file1"); Path subdirPath = new Path(dirPath, "subdir1"); DistributedFileSystem fs = cluster.getFileSystem(); fs.mkdirs(dirPath); List<AclEntry> aclSpec = Lists.newArrayList( aclEntry(DEFAULT, USER, "foo", ALL)); fs.setAcl(dirPath, aclSpec); fs.create(filePath).close(); fs.mkdirs(subdirPath); AclEntry[] fileExpected = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE) }; AclEntry[] subdirExpected = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, READ_EXECUTE) }; AclEntry[] fileReturned = fs.getAclStatus(filePath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(fileExpected, fileReturned); AclEntry[] subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); assertPermission(fs, subdirPath, (short)010755); restart(fs, persistNamespace); fileReturned = fs.getAclStatus(filePath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(fileExpected, fileReturned); subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); assertPermission(fs, subdirPath, (short)010755); aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE)); fs.modifyAclEntries(dirPath, aclSpec); fileReturned = fs.getAclStatus(filePath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(fileExpected, fileReturned); subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); assertPermission(fs, subdirPath, (short)010755); restart(fs, persistNamespace); fileReturned = fs.getAclStatus(filePath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(fileExpected, fileReturned); subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); assertPermission(fs, subdirPath, (short)010755); fs.removeAcl(dirPath); fileReturned = fs.getAclStatus(filePath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(fileExpected, fileReturned); subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); assertPermission(fs, subdirPath, (short)010755); restart(fs, persistNamespace); fileReturned = fs.getAclStatus(filePath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(fileExpected, fileReturned); subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); assertPermission(fs, subdirPath, (short)010755); }
Example 16
Source File: TestFSImage.java From big-c with Apache License 2.0 | 4 votes |
/** * Ensure mtime and atime can be loaded from fsimage. */ @Test(timeout=60000) public void testLoadMtimeAtime() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem hdfs = cluster.getFileSystem(); String userDir = hdfs.getHomeDirectory().toUri().getPath().toString(); Path file = new Path(userDir, "file"); Path dir = new Path(userDir, "/dir"); Path link = new Path(userDir, "/link"); hdfs.createNewFile(file); hdfs.mkdirs(dir); hdfs.createSymlink(file, link, false); long mtimeFile = hdfs.getFileStatus(file).getModificationTime(); long atimeFile = hdfs.getFileStatus(file).getAccessTime(); long mtimeDir = hdfs.getFileStatus(dir).getModificationTime(); long mtimeLink = hdfs.getFileLinkStatus(link).getModificationTime(); long atimeLink = hdfs.getFileLinkStatus(link).getAccessTime(); // save namespace and restart cluster hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); hdfs.saveNamespace(); hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); cluster.shutdown(); cluster = new MiniDFSCluster.Builder(conf).format(false) .numDataNodes(1).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); assertEquals(mtimeFile, hdfs.getFileStatus(file).getModificationTime()); assertEquals(atimeFile, hdfs.getFileStatus(file).getAccessTime()); assertEquals(mtimeDir, hdfs.getFileStatus(dir).getModificationTime()); assertEquals(mtimeLink, hdfs.getFileLinkStatus(link).getModificationTime()); assertEquals(atimeLink, hdfs.getFileLinkStatus(link).getAccessTime()); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 17
Source File: TestSnapshotStatsMXBean.java From big-c with Apache License 2.0 | 4 votes |
/** * Test getting SnapshotStatsMXBean information */ @Test public void testSnapshotStatsMXBeanInfo() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = null; String pathName = "/snapshot"; Path path = new Path(pathName); try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); SnapshotManager sm = cluster.getNamesystem().getSnapshotManager(); DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem(); dfs.mkdirs(path); dfs.allowSnapshot(path); dfs.createSnapshot(path); MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName = new ObjectName( "Hadoop:service=NameNode,name=SnapshotInfo"); CompositeData[] directories = (CompositeData[]) mbs.getAttribute( mxbeanName, "SnapshottableDirectories"); int numDirectories = Array.getLength(directories); assertEquals(sm.getNumSnapshottableDirs(), numDirectories); CompositeData[] snapshots = (CompositeData[]) mbs.getAttribute(mxbeanName, "Snapshots"); int numSnapshots = Array.getLength(snapshots); assertEquals(sm.getNumSnapshots(), numSnapshots); CompositeData d = (CompositeData) Array.get(directories, 0); CompositeData s = (CompositeData) Array.get(snapshots, 0); assertTrue(((String) d.get("path")).contains(pathName)); assertTrue(((String) s.get("snapshotDirectory")).contains(pathName)); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 18
Source File: TestINodeFile.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testFilesInGetListingOps() throws Exception { final Configuration conf = new Configuration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); final DistributedFileSystem hdfs = cluster.getFileSystem(); final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); hdfs.mkdirs(new Path("/tmp")); DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0); DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0); DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0); DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp", HdfsFileStatus.EMPTY_NAME, false); assertTrue(dl.getPartialListing().length == 3); String f2 = new String("f2"); dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false); assertTrue(dl.getPartialListing().length == 1); INode f2INode = fsdir.getINode("/tmp/f2"); String f2InodePath = "/.reserved/.inodes/" + f2INode.getId(); dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(), false); assertTrue(dl.getPartialListing().length == 1); // Test the deleted startAfter file hdfs.delete(new Path("/tmp/f2"), false); try { dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(), false); fail("Didn't get exception for the deleted startAfter token."); } catch (IOException e) { assertTrue(e instanceof DirectoryListingStartAfterNotFoundException); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 19
Source File: TestBootstrapStandbyWithBKJM.java From hadoop with Apache License 2.0 | 4 votes |
/** * While boostrapping, in_progress transaction entries should be skipped. * Bootstrap usage for BKJM : "-force", "-nonInteractive", "-skipSharedEditsCheck" */ @Test public void testBootstrapStandbyWithActiveNN() throws Exception { // make nn0 active cluster.transitionToActive(0); // do ops and generate in-progress edit log data Configuration confNN1 = cluster.getConfiguration(1); DistributedFileSystem dfs = (DistributedFileSystem) HATestUtil .configureFailoverFs(cluster, confNN1); for (int i = 1; i <= 10; i++) { dfs.mkdirs(new Path("/test" + i)); } dfs.close(); // shutdown nn1 and delete its edit log files cluster.shutdownNameNode(1); deleteEditLogIfExists(confNN1); cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_ENTER, true); cluster.getNameNodeRpc(0).saveNamespace(); cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, true); // check without -skipSharedEditsCheck, Bootstrap should fail for BKJM // immediately after saveNamespace int rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive" }, confNN1); Assert.assertEquals("Mismatches return code", 6, rc); // check with -skipSharedEditsCheck rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive", "-skipSharedEditsCheck" }, confNN1); Assert.assertEquals("Mismatches return code", 0, rc); // Checkpoint as fast as we can, in a tight loop. confNN1.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1); cluster.restartNameNode(1); cluster.transitionToStandby(1); NameNode nn0 = cluster.getNameNode(0); HATestUtil.waitForStandbyToCatchUp(nn0, cluster.getNameNode(1)); long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0) .getFSImage().getMostRecentCheckpointTxId(); HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of((int) expectedCheckpointTxId)); // Should have copied over the namespace FSImageTestUtil.assertNNHasCheckpoints(cluster, 1, ImmutableList.of((int) expectedCheckpointTxId)); FSImageTestUtil.assertNNFilesMatch(cluster); }
Example 20
Source File: TestWithMiniClusterBase.java From NNAnalytics with Apache License 2.0 | 4 votes |
protected void addFiles(int numOfFiles, long sleepBetweenMs) throws Exception { DistributedFileSystem fileSystem = (DistributedFileSystem) FileSystem.get(CONF); for (int i = 0; i < numOfFiles; i++) { int dirNumber1 = RANDOM.nextInt(10); Path dirPath = new Path("/dir" + dirNumber1); int dirNumber2 = RANDOM.nextInt(10); dirPath = dirPath.suffix("/dir" + dirNumber2); int dirNumber3 = RANDOM.nextInt(10); dirPath = dirPath.suffix("/dir" + dirNumber3); fileSystem.mkdirs(dirPath); Path filePath = dirPath.suffix("/file" + i); int fileType = RANDOM.nextInt(7); switch (fileType) { case 0: filePath = filePath.suffix(".zip"); break; case 1: filePath = filePath.suffix(".avro"); break; case 2: filePath = filePath.suffix(".orc"); break; case 3: filePath = filePath.suffix(".txt"); break; case 4: filePath = filePath.suffix(".json"); break; case 5: filePath = dirPath.suffix("/part-r-" + i); break; case 6: filePath = filePath.suffix("_45454"); default: break; } int fileSize = RANDOM.nextInt(4); switch (fileSize) { case 1: DFSTestUtil.writeFile(fileSystem, filePath, new String(TINY_FILE_BYTES)); break; case 2: DFSTestUtil.writeFile(fileSystem, filePath, new String(SMALL_FILE_BYTES)); break; case 3: DFSTestUtil.writeFile(fileSystem, filePath, new String(MEDIUM_FILE_BYTES)); break; case 0: default: DFSTestUtil.writeFile(fileSystem, filePath, ""); break; } if (dirNumber1 == 1) { fileSystem.setQuota(filePath.getParent(), 100L, 100000000000L); } int user = RANDOM.nextInt(3); switch (user) { case 1: fileSystem.setOwner(filePath, USERS[0], USERS[0]); break; case 2: fileSystem.setOwner(filePath, USERS[1], USERS[1]); break; case 0: default: break; } short repFactor = (short) RANDOM.nextInt(4); if (repFactor != 0) { fileSystem.setReplication(filePath, repFactor); } int weeksAgo = RANDOM.nextInt(60); long timeStamp = System.currentTimeMillis() - TimeUnit.DAYS.toMillis(weeksAgo * 7); if (weeksAgo != 0) { fileSystem.setTimes(filePath, timeStamp, timeStamp); } if (sleepBetweenMs != 0L) { Thread.sleep(sleepBetweenMs); } } }