Java Code Examples for org.apache.hadoop.fs.FileUtil#fullyDelete()
The following examples show how to use
org.apache.hadoop.fs.FileUtil#fullyDelete() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ContinuousFileProcessingCheckpointITCase.java From flink with Apache License 2.0 | 6 votes |
@Before public void createHDFS() { if (failoverStrategy.equals(FailoverStrategy.RestartPipelinedRegionStrategy)) { // TODO the 'NO_OF_RETRIES' is useless for current RestartPipelinedRegionStrategy, // for this ContinuousFileProcessingCheckpointITCase, using RestartPipelinedRegionStrategy would result in endless running. throw new AssumptionViolatedException("ignored ContinuousFileProcessingCheckpointITCase when using RestartPipelinedRegionStrategy"); } try { baseDir = new File("./target/localfs/fs_tests").getAbsoluteFile(); FileUtil.fullyDelete(baseDir); org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration(); localFsURI = "file:///" + baseDir + "/"; localFs = new org.apache.hadoop.fs.Path(localFsURI).getFileSystem(hdConf); } catch (Throwable e) { e.printStackTrace(); Assert.fail("Test failed " + e.getMessage()); } }
Example 2
Source File: TestMapReduce.java From hadoop with Apache License 2.0 | 6 votes |
/** * Launches all the tasks in order. */ public static void main(String[] argv) throws Exception { if (argv.length < 2) { System.err.println("Usage: TestMapReduce <range> <counts>"); System.err.println(); System.err.println("Note: a good test will have a <counts> value" + " that is substantially larger than the <range>"); return; } int i = 0; range = Integer.parseInt(argv[i++]); counts = Integer.parseInt(argv[i++]); try { launch(); } finally { FileUtil.fullyDelete(TEST_DIR); } }
Example 3
Source File: TestStartup.java From RDFS with Apache License 2.0 | 6 votes |
protected void setUp() throws Exception { config = new Configuration(); String baseDir = System.getProperty("test.build.data", "/tmp"); hdfsDir = new File(baseDir, "dfs"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath()); config.set("dfs.name.dir", new File(hdfsDir, "name").getPath()); config.set("dfs.data.dir", new File(hdfsDir, "data").getPath()); config.set("fs.checkpoint.dir",new File(hdfsDir, "secondary").getPath()); config.setInt("dfs.secondary.info.port", 0); //config.set("fs.default.name", "hdfs://"+ NAME_NODE_HOST + "0"); FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0"); }
Example 4
Source File: TestAggregatedLogsBlock.java From hadoop with Apache License 2.0 | 6 votes |
/** * Bad user. User 'owner' is trying to read logs without access */ @Test public void testAccessDenied() throws Exception { FileUtil.fullyDelete(new File("target/logs")); Configuration configuration = getConfiguration(); writeLogs("target/logs/logs/application_0_0001/container_0_0001_01_000001"); writeLog(configuration, "owner"); AggregatedLogsBlockForTest aggregatedBlock = getAggregatedLogsBlockForTest( configuration, "owner", "container_0_0001_01_000001"); ByteArrayOutputStream data = new ByteArrayOutputStream(); PrintWriter printWriter = new PrintWriter(data); HtmlBlock html = new HtmlBlockForTest(); HtmlBlock.Block block = new BlockForTest(html, printWriter, 10, false); aggregatedBlock.render(block); block.getWriter().flush(); String out = data.toString(); assertTrue(out .contains("User [owner] is not authorized to view the logs for entity")); }
Example 5
Source File: ContinuousFileProcessingITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@After public void destroyHDFS() { try { FileUtil.fullyDelete(baseDir); hdfsCluster.shutdown(); } catch (Throwable t) { throw new RuntimeException(t); } }
Example 6
Source File: TestUnconsumedInput.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testUnconsumedInput() throws Exception { String outFileName = "part-00000"; File outFile = null; try { try { FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); } catch (Exception e) { } createInput(); // setup config to ignore unconsumed input Configuration conf = new Configuration(); conf.set("stream.minRecWrittenToEnableSkip_", "0"); job = new StreamJob(); job.setConf(conf); int exitCode = job.run(genArgs()); assertEquals("Job failed", 0, exitCode); outFile = new File(OUTPUT_DIR, outFileName).getAbsoluteFile(); String output = StreamUtil.slurp(outFile); assertEquals("Output was truncated", EXPECTED_OUTPUT_SIZE, StringUtils.countMatches(output, "\t")); } finally { INPUT_FILE.delete(); FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); } }
Example 7
Source File: ColumnCardinalityJobTest.java From Kylin with Apache License 2.0 | 5 votes |
@Test @Ignore("not maintaining") public void testJob() throws Exception { final String input = "src/test/resources/data/test_cal_dt/"; final String output = "target/test-output/column-cardinality/"; FileUtil.fullyDelete(new File(output)); String[] args = { "-input", input, "-output", output, "-cols", "1,2,3,4,5,6,9,0" }; assertEquals("Job failed", 0, ToolRunner.run(new HiveColumnCardinalityJob(), args)); }
Example 8
Source File: TestNameNodeRespectsBindHostKeys.java From big-c with Apache License 2.0 | 5 votes |
private static void setupSsl() throws Exception { Configuration conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); File base = new File(BASEDIR); FileUtil.fullyDelete(base); assertTrue(base.mkdirs()); final String keystoresDir = new File(BASEDIR).getAbsolutePath(); final String sslConfDir = KeyStoreTestUtil.getClasspathDir(TestNameNodeRespectsBindHostKeys.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); }
Example 9
Source File: TestFileStore.java From jsr203-hadoop with Apache License 2.0 | 5 votes |
private static MiniDFSCluster startMini(String testName) throws IOException { File baseDir = new File("./target/hdfs/" + testName).getAbsoluteFile(); FileUtil.fullyDelete(baseDir); Configuration conf = new Configuration(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf); MiniDFSCluster hdfsCluster = builder.clusterId(testName).build(); hdfsCluster.waitActive(); return hdfsCluster; }
Example 10
Source File: TestAllowFormat.java From big-c with Apache License 2.0 | 5 votes |
@BeforeClass public static void setUp() throws Exception { config = new Configuration(); if ( DFS_BASE_DIR.exists() && !FileUtil.fullyDelete(DFS_BASE_DIR) ) { throw new IOException("Could not delete hdfs directory '" + DFS_BASE_DIR + "'"); } // Test has multiple name directories. // Format should not really prompt us if one of the directories exist, // but is empty. So in case the test hangs on an input, it means something // could be wrong in the format prompting code. (HDFS-1636) LOG.info("hdfsdir is " + DFS_BASE_DIR.getAbsolutePath()); File nameDir1 = new File(DFS_BASE_DIR, "name1"); File nameDir2 = new File(DFS_BASE_DIR, "name2"); // To test multiple directory handling, we pre-create one of the name directories. nameDir1.mkdirs(); // Set multiple name directories. config.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir1.getPath() + "," + nameDir2.getPath()); config.set(DFS_DATANODE_DATA_DIR_KEY, new File(DFS_BASE_DIR, "data").getPath()); config.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(DFS_BASE_DIR, "secondary").getPath()); FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0"); }
Example 11
Source File: TestSSLHttpServer.java From hadoop with Apache License 2.0 | 5 votes |
@AfterClass public static void cleanup() throws Exception { server.stop(); FileUtil.fullyDelete(new File(BASEDIR)); KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir); clientSslFactory.destroy(); }
Example 12
Source File: MiniAvatarCluster.java From RDFS with Apache License 2.0 | 5 votes |
public void cleanupAvatarDirs() throws IOException { String[] files = new String[] {fsimagelocal0Dir, fsimagelocal1Dir, fsimage0Dir, fsimage1Dir, fseditslocal0Dir, fseditslocal1Dir, fsedits0Dir, fsedits1Dir }; for (String filename : files) { FileUtil.fullyDelete(new File(filename)); } }
Example 13
Source File: TestLeveldbRMStateStore.java From hadoop with Apache License 2.0 | 5 votes |
@After public void cleanup() throws IOException { if (stateStore != null) { stateStore.close(); } FileUtil.fullyDelete(TEST_DIR); }
Example 14
Source File: TestRawBytesStreaming.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testCommandLine() throws Exception { try { try { FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); } catch (Exception e) { } createInput(); OUTPUT_DIR.delete(); // During tests, the default Configuration will use a local mapred // So don't specify -config or -cluster StreamJob job = new StreamJob(); job.setConf(new Configuration()); job.run(genArgs()); File outFile = new File(OUTPUT_DIR, "part-00000").getAbsoluteFile(); String output = StreamUtil.slurp(outFile); outFile.delete(); System.out.println(" map=" + map); System.out.println("reduce=" + reduce); System.err.println("outEx1=" + outputExpect); System.err.println(" out1=" + output); assertEquals(outputExpect, output); } finally { INPUT_FILE.delete(); FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); } }
Example 15
Source File: TestNMLeveldbStateStoreService.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setup() throws IOException { FileUtil.fullyDelete(TMP_DIR); conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true); conf.set(YarnConfiguration.NM_RECOVERY_DIR, TMP_DIR.toString()); restartStateStore(); }
Example 16
Source File: TestQueue.java From big-c with Apache License 2.0 | 4 votes |
@After public void cleanup() { FileUtil.fullyDelete(testDir); }
Example 17
Source File: TestMRAsyncDiskService.java From hadoop with Apache License 2.0 | 4 votes |
@Override protected void setUp() { FileUtil.fullyDelete(new File(TEST_ROOT_DIR)); }
Example 18
Source File: TestFileOutputCommitter.java From hadoop with Apache License 2.0 | 4 votes |
private void testRecoveryInternal(int commitVersion, int recoveryVersion) throws Exception { Job job = Job.getInstance(); FileOutputFormat.setOutputPath(job, outDir); Configuration conf = job.getConfiguration(); conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt); conf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 1); conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, commitVersion); JobContext jContext = new JobContextImpl(conf, taskID.getJobID()); TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID); FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext); // setup committer.setupJob(jContext); committer.setupTask(tContext); // write output TextOutputFormat theOutputFormat = new TextOutputFormat(); RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(tContext); writeOutput(theRecordWriter, tContext); // do commit committer.commitTask(tContext); Path jobTempDir1 = committer.getCommittedTaskPath(tContext); File jtd = new File(jobTempDir1.toUri().getPath()); if (commitVersion == 1) { assertTrue("Version 1 commits to temporary dir " + jtd, jtd.exists()); validateContent(jtd); } else { assertFalse("Version 2 commits to output dir " + jtd, jtd.exists()); } //now while running the second app attempt, //recover the task output from first attempt Configuration conf2 = job.getConfiguration(); conf2.set(MRJobConfig.TASK_ATTEMPT_ID, attempt); conf2.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 2); conf2.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, recoveryVersion); JobContext jContext2 = new JobContextImpl(conf2, taskID.getJobID()); TaskAttemptContext tContext2 = new TaskAttemptContextImpl(conf2, taskID); FileOutputCommitter committer2 = new FileOutputCommitter(outDir, tContext2); committer2.setupJob(tContext2); Path jobTempDir2 = committer2.getCommittedTaskPath(tContext2); File jtd2 = new File(jobTempDir2.toUri().getPath()); committer2.recoverTask(tContext2); if (recoveryVersion == 1) { assertTrue("Version 1 recovers to " + jtd2, jtd2.exists()); validateContent(jtd2); } else { assertFalse("Version 2 commits to output dir " + jtd2, jtd2.exists()); if (commitVersion == 1) { assertTrue("Version 2 recovery moves to output dir from " + jtd , jtd.list().length == 0); } } committer2.commitJob(jContext2); validateContent(outDir); FileUtil.fullyDelete(new File(outDir.toString())); }
Example 19
Source File: TestTaskCommit.java From hadoop with Apache License 2.0 | 4 votes |
@Override public void tearDown() throws Exception { super.tearDown(); FileUtil.fullyDelete(new File(rootDir.toString())); }
Example 20
Source File: Storage.java From hadoop-gpu with Apache License 2.0 | 3 votes |
/** * Clear and re-create storage directory. * <p> * Removes contents of the current directory and creates an empty directory. * * This does not fully format storage directory. * It cannot write the version file since it should be written last after * all other storage type dependent files are written. * Derived storage is responsible for setting specific storage values and * writing the version file to disk. * * @throws IOException */ public void clearDirectory() throws IOException { File curDir = this.getCurrentDir(); if (curDir.exists()) if (!(FileUtil.fullyDelete(curDir))) throw new IOException("Cannot remove current directory: " + curDir); if (!curDir.mkdirs()) throw new IOException("Cannot create directory " + curDir); }