Java Code Examples for org.apache.hadoop.fs.FileContext#getLocalFSFileContext()
The following examples show how to use
org.apache.hadoop.fs.FileContext#getLocalFSFileContext() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestDefaultContainerExecutor.java From big-c with Apache License 2.0 | 6 votes |
byte[] createTmpFile(Path dst, Random r, int len) throws IOException { // use unmodified local context FileContext lfs = FileContext.getLocalFSFileContext(); dst = lfs.makeQualified(dst); lfs.mkdir(dst.getParent(), null, true); byte[] bytes = new byte[len]; FSDataOutputStream out = null; try { out = lfs.create(dst, EnumSet.of(CREATE, OVERWRITE)); r.nextBytes(bytes); out.write(bytes); } finally { if (out != null) out.close(); } return bytes; }
Example 2
Source File: TestTimelineDataManager.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { fsPath = new File("target", this.getClass().getSimpleName() + "-tmpDir").getAbsoluteFile(); fsContext = FileContext.getLocalFSFileContext(); fsContext.delete(new Path(fsPath.getAbsolutePath()), true); Configuration conf = new YarnConfiguration(); conf.set(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH, fsPath.getAbsolutePath()); conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_TTL_ENABLE, false); store = new LeveldbTimelineStore(); store.init(conf); store.start(); loadTestEntityData(); loadVerificationEntityData(); loadTestDomainData(); conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, false); aclsManager = new TimelineACLsManager(conf); dataManaer = new TimelineDataManager(store, aclsManager); conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); conf.set(YarnConfiguration.YARN_ADMIN_ACL, "admin"); adminACLsManager = new AdminACLsManager(conf); }
Example 3
Source File: TestContainerLocalizer.java From big-c with Apache License 2.0 | 6 votes |
@Test @SuppressWarnings("unchecked") // mocked generics public void testContainerLocalizerClosesFilesystems() throws Exception { // verify filesystems are closed when localizer doesn't fail FileContext fs = FileContext.getLocalFSFileContext(); spylfs = spy(fs.getDefaultFileSystem()); ContainerLocalizer localizer = setupContainerLocalizerForTest(); doNothing().when(localizer).localizeFiles(any(LocalizationProtocol.class), any(CompletionService.class), any(UserGroupInformation.class)); verify(localizer, never()).closeFileSystems( any(UserGroupInformation.class)); localizer.runLocalization(nmAddr); verify(localizer).closeFileSystems(any(UserGroupInformation.class)); spylfs = spy(fs.getDefaultFileSystem()); // verify filesystems are closed when localizer fails localizer = setupContainerLocalizerForTest(); doThrow(new YarnRuntimeException("Forced Failure")).when(localizer).localizeFiles( any(LocalizationProtocol.class), any(CompletionService.class), any(UserGroupInformation.class)); verify(localizer, never()).closeFileSystems( any(UserGroupInformation.class)); localizer.runLocalization(nmAddr); verify(localizer).closeFileSystems(any(UserGroupInformation.class)); }
Example 4
Source File: TestNodeManagerShutdown.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setup() throws UnsupportedFileSystemException { localFS = FileContext.getLocalFSFileContext(); tmpDir.mkdirs(); logsDir.mkdirs(); remoteLogsDir.mkdirs(); nmLocalDir.mkdirs(); // Construct the Container-id cId = createContainerId(); }
Example 5
Source File: TestCreateEditsLog.java From hadoop with Apache License 2.0 | 5 votes |
/** * Tests that an edits log created using CreateEditsLog is valid and can be * loaded successfully by a namenode. */ @Test(timeout=60000) public void testCanLoadCreatedEditsLog() throws Exception { // Format namenode. HdfsConfiguration conf = new HdfsConfiguration(); File nameDir = new File(HDFS_DIR, "name"); conf.set(DFS_NAMENODE_NAME_DIR_KEY, Util.fileAsURI(nameDir).toString()); DFSTestUtil.formatNameNode(conf); // Call CreateEditsLog and move the resulting edits to the name dir. CreateEditsLog.main(new String[] { "-f", "1000", "0", "1", "-d", TEST_DIR.getAbsolutePath() }); Path editsWildcard = new Path(TEST_DIR.getAbsolutePath(), "*"); FileContext localFc = FileContext.getLocalFSFileContext(); for (FileStatus edits: localFc.util().globStatus(editsWildcard)) { Path src = edits.getPath(); Path dst = new Path(new File(nameDir, "current").getAbsolutePath(), src.getName()); localFc.rename(src, dst); } // Start a namenode to try to load the edits. cluster = new MiniDFSCluster.Builder(conf) .format(false) .manageNameDfsDirs(false) .waitSafeMode(false) .build(); cluster.waitClusterUp(); // Test successful, because no exception thrown. }
Example 6
Source File: TestDirectoryCollection.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setupForTests() throws IOException { conf = new Configuration(); localFs = FileContext.getLocalFSFileContext(conf); testDir.mkdirs(); testFile.createNewFile(); }
Example 7
Source File: TestLinuxContainerExecutor.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setup() throws Exception { files = FileContext.getLocalFSFileContext(); Path workSpacePath = new Path(workSpace.getAbsolutePath()); files.mkdir(workSpacePath, null, true); FileUtil.chmod(workSpace.getAbsolutePath(), "777"); File localDir = new File(workSpace.getAbsoluteFile(), "localDir"); files.mkdir(new Path(localDir.getAbsolutePath()), new FsPermission("777"), false); File logDir = new File(workSpace.getAbsoluteFile(), "logDir"); files.mkdir(new Path(logDir.getAbsolutePath()), new FsPermission("777"), false); String exec_path = System.getProperty("container-executor.path"); if(exec_path != null && !exec_path.isEmpty()) { conf = new Configuration(false); conf.setClass("fs.AbstractFileSystem.file.impl", org.apache.hadoop.fs.local.LocalFs.class, org.apache.hadoop.fs.AbstractFileSystem.class); conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY, "xuan"); LOG.info("Setting "+YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH +"="+exec_path); conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, exec_path); exec = new LinuxContainerExecutor(); exec.setConf(conf); conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath()); conf.set(YarnConfiguration.NM_LOG_DIRS, logDir.getAbsolutePath()); dirsHandler = new LocalDirsHandlerService(); dirsHandler.init(conf); } appSubmitter = System.getProperty("application.submitter"); if(appSubmitter == null || appSubmitter.isEmpty()) { appSubmitter = "nobody"; } }
Example 8
Source File: TestNodeStatusUpdater.java From hadoop with Apache License 2.0 | 5 votes |
@Test(timeout = 200000) public void testNodeStatusUpdaterRetryAndNMShutdown() throws Exception { final long connectionWaitSecs = 1000; final long connectionRetryIntervalMs = 1000; YarnConfiguration conf = createNMConfig(); conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, connectionWaitSecs); conf.setLong(YarnConfiguration .RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, connectionRetryIntervalMs); conf.setLong(YarnConfiguration.NM_SLEEP_DELAY_BEFORE_SIGKILL_MS, 5000); conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1); CyclicBarrier syncBarrier = new CyclicBarrier(2); nm = new MyNodeManager2(syncBarrier, conf); nm.init(conf); nm.start(); // start a container ContainerId cId = TestNodeManagerShutdown.createContainerId(); FileContext localFS = FileContext.getLocalFSFileContext(); TestNodeManagerShutdown.startContainer(nm, cId, localFS, nmLocalDir, new File("start_file.txt")); try { syncBarrier.await(10000, TimeUnit.MILLISECONDS); } catch (Exception e) { } Assert.assertFalse("Containers not cleaned up when NM stopped", assertionFailedInThread.get()); Assert.assertTrue(((MyNodeManager2) nm).isStopped); Assert.assertTrue("calculate heartBeatCount based on" + " connectionWaitSecs and RetryIntervalSecs", heartBeatID == 2); }
Example 9
Source File: LocalDistributedCacheManager.java From big-c with Apache License 2.0 | 5 votes |
public void close() throws IOException { for (File symlink : symlinksCreated) { if (!symlink.delete()) { LOG.warn("Failed to delete symlink created by the local job runner: " + symlink); } } FileContext localFSFileContext = FileContext.getLocalFSFileContext(); for (String archive : localArchives) { localFSFileContext.delete(new Path(archive), true); } for (String file : localFiles) { localFSFileContext.delete(new Path(file), true); } }
Example 10
Source File: TestMRAppMaster.java From big-c with Apache License 2.0 | 5 votes |
@BeforeClass public static void setup() throws AccessControlException, FileNotFoundException, IllegalArgumentException, IOException { //Do not error out if metrics are inited multiple times DefaultMetricsSystem.setMiniClusterMode(true); File dir = new File(stagingDir); stagingDir = dir.getAbsolutePath(); localFS = FileContext.getLocalFSFileContext(); localFS.delete(new Path(testDir.getAbsolutePath()), true); testDir.mkdir(); }
Example 11
Source File: TestDefaultContainerExecutor.java From big-c with Apache License 2.0 | 5 votes |
@AfterClass public static void deleteTmpFiles() throws IOException { FileContext lfs = FileContext.getLocalFSFileContext(); try { lfs.delete(BASE_TMP_PATH, true); } catch (FileNotFoundException e) { } }
Example 12
Source File: ContainersLauncher.java From big-c with Apache License 2.0 | 5 votes |
@Override protected void serviceInit(Configuration conf) throws Exception { try { //TODO Is this required? FileContext.getLocalFSFileContext(conf); } catch (UnsupportedFileSystemException e) { throw new YarnRuntimeException("Failed to start ContainersLauncher", e); } super.serviceInit(conf); }
Example 13
Source File: ViewFsTestSetup.java From big-c with Apache License 2.0 | 5 votes |
static public FileContext setupForViewFsLocalFs(FileContextTestHelper helper) throws Exception { /** * create the test root on local_fs - the mount table will point here */ FileContext fsTarget = FileContext.getLocalFSFileContext(); Path targetOfTests = helper.getTestRootPath(fsTarget); // In case previous test was killed before cleanup fsTarget.delete(targetOfTests, true); fsTarget.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true); Configuration conf = new Configuration(); // Set up viewfs link for test dir as described above String testDir = helper.getTestRootPath(fsTarget).toUri() .getPath(); linkUpFirstComponents(conf, testDir, fsTarget, "test dir"); // Set up viewfs link for home dir as described above setUpHomeDir(conf, fsTarget); // the test path may be relative to working dir - we need to make that work: // Set up viewfs link for wd as described above String wdDir = fsTarget.getWorkingDirectory().toUri().getPath(); linkUpFirstComponents(conf, wdDir, fsTarget, "working dir"); FileContext fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf); fc.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd. Log.info("Working dir is: " + fc.getWorkingDirectory()); //System.out.println("SRCOfTests = "+ getTestRootPath(fc, "test")); //System.out.println("TargetOfTests = "+ targetOfTests.toUri()); return fc; }
Example 14
Source File: TestNodeManagerShutdown.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setup() throws UnsupportedFileSystemException { localFS = FileContext.getLocalFSFileContext(); tmpDir.mkdirs(); logsDir.mkdirs(); remoteLogsDir.mkdirs(); nmLocalDir.mkdirs(); // Construct the Container-id cId = createContainerId(); }
Example 15
Source File: TestViewFsLocalFs.java From big-c with Apache License 2.0 | 5 votes |
@Override @Before public void setUp() throws Exception { // create the test root on local_fs fcTarget = FileContext.getLocalFSFileContext(); super.setUp(); }
Example 16
Source File: TestLogalyzer.java From hadoop with Apache License 2.0 | 5 votes |
/** * Create simple log file * * @return * @throws IOException */ private Path createLogFile() throws IOException { FileContext files = FileContext.getLocalFSFileContext(); Path ws = new Path(workSpace.getAbsoluteFile().getAbsolutePath()); files.delete(ws, true); Path workSpacePath = new Path(workSpace.getAbsolutePath(), "log"); files.mkdir(workSpacePath, null, true); LOG.info("create logfile.log"); Path logfile1 = new Path(workSpacePath, "logfile.log"); FSDataOutputStream os = files.create(logfile1, EnumSet.of(CreateFlag.CREATE)); os.writeBytes("4 3" + EL + "1 3" + EL + "4 44" + EL); os.writeBytes("2 3" + EL + "1 3" + EL + "0 45" + EL); os.writeBytes("4 3" + EL + "1 3" + EL + "1 44" + EL); os.flush(); os.close(); LOG.info("create logfile1.log"); Path logfile2 = new Path(workSpacePath, "logfile1.log"); os = files.create(logfile2, EnumSet.of(CreateFlag.CREATE)); os.writeBytes("4 3" + EL + "1 3" + EL + "3 44" + EL); os.writeBytes("2 3" + EL + "1 3" + EL + "0 45" + EL); os.writeBytes("4 3" + EL + "1 3" + EL + "1 44" + EL); os.flush(); os.close(); return workSpacePath; }
Example 17
Source File: TestDirectoryCollection.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setupForTests() throws IOException { conf = new Configuration(); localFs = FileContext.getLocalFSFileContext(conf); testDir.mkdirs(); testFile.createNewFile(); }
Example 18
Source File: TestDockerContainerExecutor.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setup() { try { lfs = FileContext.getLocalFSFileContext(); workDir = new Path("/tmp/temp-" + System.currentTimeMillis()); workSpace = new File(workDir.toUri().getPath()); lfs.mkdir(workDir, FsPermission.getDirDefault(), true); } catch (IOException e) { throw new RuntimeException(e); } Configuration conf = new Configuration(); yarnImage = "yarnImage"; long time = System.currentTimeMillis(); conf.set(YarnConfiguration.NM_LOCAL_DIRS, "/tmp/nm-local-dir" + time); conf.set(YarnConfiguration.NM_LOG_DIRS, "/tmp/userlogs" + time); dockerUrl = System.getProperty("docker-service-url"); LOG.info("dockerUrl: " + dockerUrl); if (Strings.isNullOrEmpty(dockerUrl)) { return; } dockerUrl = " -H " + dockerUrl; dockerExec = "docker " + dockerUrl; conf.set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, yarnImage); conf.set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME, dockerExec); exec = new DockerContainerExecutor(); dirsHandler = new LocalDirsHandlerService(); dirsHandler.init(conf); exec.setConf(conf); appSubmitter = System.getProperty("application.submitter"); if (appSubmitter == null || appSubmitter.isEmpty()) { appSubmitter = "nobody"; } shellExec(dockerExec + " pull " + testImage); }
Example 19
Source File: TestLocalDirsHandlerService.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testGetFullDirs() throws Exception { Configuration conf = new YarnConfiguration(); conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077"); FileContext localFs = FileContext.getLocalFSFileContext(conf); String localDir1 = new File(testDir, "localDir1").getPath(); String localDir2 = new File(testDir, "localDir2").getPath(); String logDir1 = new File(testDir, "logDir1").getPath(); String logDir2 = new File(testDir, "logDir2").getPath(); Path localDir1Path = new Path(localDir1); Path logDir1Path = new Path(logDir1); FsPermission dirPermissions = new FsPermission((short) 0410); localFs.mkdir(localDir1Path, dirPermissions, true); localFs.mkdir(logDir1Path, dirPermissions, true); conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1 + "," + localDir2); conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1 + "," + logDir2); conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 0.0f); NodeManagerMetrics nm = NodeManagerMetrics.create(); LocalDirsHandlerService dirSvc = new LocalDirsHandlerService(nm); dirSvc.init(conf); Assert.assertEquals(0, dirSvc.getLocalDirs().size()); Assert.assertEquals(0, dirSvc.getLogDirs().size()); Assert.assertEquals(1, dirSvc.getDiskFullLocalDirs().size()); Assert.assertEquals(1, dirSvc.getDiskFullLogDirs().size()); // check the metrics Assert.assertEquals(2, nm.getBadLocalDirs()); Assert.assertEquals(2, nm.getBadLogDirs()); Assert.assertEquals(0, nm.getGoodLocalDirsDiskUtilizationPerc()); Assert.assertEquals(0, nm.getGoodLogDirsDiskUtilizationPerc()); conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 100.0f); nm = NodeManagerMetrics.create(); dirSvc = new LocalDirsHandlerService(nm); dirSvc.init(conf); Assert.assertEquals(1, dirSvc.getLocalDirs().size()); Assert.assertEquals(1, dirSvc.getLogDirs().size()); Assert.assertEquals(0, dirSvc.getDiskFullLocalDirs().size()); Assert.assertEquals(0, dirSvc.getDiskFullLogDirs().size()); // check the metrics File dir = new File(localDir1); int utilizationPerc = (int) ((dir.getTotalSpace() - dir.getUsableSpace()) * 100 / dir.getTotalSpace()); Assert.assertEquals(1, nm.getBadLocalDirs()); Assert.assertEquals(1, nm.getBadLogDirs()); Assert.assertEquals(utilizationPerc, nm.getGoodLocalDirsDiskUtilizationPerc()); Assert .assertEquals(utilizationPerc, nm.getGoodLogDirsDiskUtilizationPerc()); FileUtils.deleteDirectory(new File(localDir1)); FileUtils.deleteDirectory(new File(localDir2)); FileUtils.deleteDirectory(new File(logDir1)); FileUtils.deleteDirectory(new File(logDir1)); dirSvc.close(); }
Example 20
Source File: TestFSDownload.java From big-c with Apache License 2.0 | 4 votes |
@AfterClass public static void deleteTestDir() throws IOException { FileContext fs = FileContext.getLocalFSFileContext(); fs.delete(new Path("target", TestFSDownload.class.getSimpleName()), true); }