Java Code Examples for org.apache.hadoop.fs.LocalFileSystem#delete()
The following examples show how to use
org.apache.hadoop.fs.LocalFileSystem#delete() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFSInputChecker.java From hadoop with Apache License 2.0 | 5 votes |
private void testFileCorruption(LocalFileSystem fileSys) throws IOException { // create a file and verify that checksum corruption results in // a checksum exception on LocalFS String dir = PathUtils.getTestDirName(getClass()); Path file = new Path(dir + "/corruption-test.dat"); Path crcFile = new Path(dir + "/.corruption-test.dat.crc"); writeFile(fileSys, file); int fileLen = (int)fileSys.getFileStatus(file).getLen(); byte [] buf = new byte[fileLen]; InputStream in = fileSys.open(file); IOUtils.readFully(in, buf, 0, buf.length); in.close(); // check .crc corruption checkFileCorruption(fileSys, file, crcFile); fileSys.delete(file, true); writeFile(fileSys, file); // check data corrutpion checkFileCorruption(fileSys, file, file); fileSys.delete(file, true); }
Example 2
Source File: TestHadoopArchives.java From hadoop with Apache License 2.0 | 5 votes |
@Test /* * Tests copying from archive file system to a local file system */ public void testCopyToLocal() throws Exception { final String fullHarPathStr = makeArchive(); // make path to copy the file to: final String tmpDir = System.getProperty("test.build.data","build/test/data") + "/work-dir/har-fs-tmp"; final Path tmpPath = new Path(tmpDir); final LocalFileSystem localFs = FileSystem.getLocal(new Configuration()); localFs.delete(tmpPath, true); localFs.mkdirs(tmpPath); assertTrue(localFs.exists(tmpPath)); // Create fresh HarFs: final HarFileSystem harFileSystem = new HarFileSystem(fs); try { final URI harUri = new URI(fullHarPathStr); harFileSystem.initialize(harUri, fs.getConf()); final Path sourcePath = new Path(fullHarPathStr + Path.SEPARATOR + "a"); final Path targetPath = new Path(tmpPath, "straus"); // copy the Har file to a local file system: harFileSystem.copyToLocalFile(false, sourcePath, targetPath); FileStatus straus = localFs.getFileStatus(targetPath); // the file should contain just 1 character: assertEquals(1, straus.getLen()); } finally { harFileSystem.close(); localFs.delete(tmpPath, true); } }
Example 3
Source File: TestBloomMapFile.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void setUp() throws Exception { LocalFileSystem fs = FileSystem.getLocal(conf); if (fs.exists(TEST_ROOT) && !fs.delete(TEST_ROOT, true)) { Assert.fail("Can't clean up test root dir"); } fs.mkdirs(TEST_ROOT); }
Example 4
Source File: TestMapFile.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setup() throws Exception { LocalFileSystem fs = FileSystem.getLocal(conf); if (fs.exists(TEST_DIR) && !fs.delete(TEST_DIR, true)) { Assert.fail("Can't clean up test root dir"); } fs.mkdirs(TEST_DIR); }
Example 5
Source File: TestFSInputChecker.java From big-c with Apache License 2.0 | 5 votes |
private void testFileCorruption(LocalFileSystem fileSys) throws IOException { // create a file and verify that checksum corruption results in // a checksum exception on LocalFS String dir = PathUtils.getTestDirName(getClass()); Path file = new Path(dir + "/corruption-test.dat"); Path crcFile = new Path(dir + "/.corruption-test.dat.crc"); writeFile(fileSys, file); int fileLen = (int)fileSys.getFileStatus(file).getLen(); byte [] buf = new byte[fileLen]; InputStream in = fileSys.open(file); IOUtils.readFully(in, buf, 0, buf.length); in.close(); // check .crc corruption checkFileCorruption(fileSys, file, crcFile); fileSys.delete(file, true); writeFile(fileSys, file); // check data corrutpion checkFileCorruption(fileSys, file, file); fileSys.delete(file, true); }
Example 6
Source File: TestHadoopArchives.java From big-c with Apache License 2.0 | 5 votes |
@Test /* * Tests copying from archive file system to a local file system */ public void testCopyToLocal() throws Exception { final String fullHarPathStr = makeArchive(); // make path to copy the file to: final String tmpDir = System.getProperty("test.build.data","build/test/data") + "/work-dir/har-fs-tmp"; final Path tmpPath = new Path(tmpDir); final LocalFileSystem localFs = FileSystem.getLocal(new Configuration()); localFs.delete(tmpPath, true); localFs.mkdirs(tmpPath); assertTrue(localFs.exists(tmpPath)); // Create fresh HarFs: final HarFileSystem harFileSystem = new HarFileSystem(fs); try { final URI harUri = new URI(fullHarPathStr); harFileSystem.initialize(harUri, fs.getConf()); final Path sourcePath = new Path(fullHarPathStr + Path.SEPARATOR + "a"); final Path targetPath = new Path(tmpPath, "straus"); // copy the Har file to a local file system: harFileSystem.copyToLocalFile(false, sourcePath, targetPath); FileStatus straus = localFs.getFileStatus(targetPath); // the file should contain just 1 character: assertEquals(1, straus.getLen()); } finally { harFileSystem.close(); localFs.delete(tmpPath, true); } }
Example 7
Source File: TestBloomMapFile.java From big-c with Apache License 2.0 | 5 votes |
@Override public void setUp() throws Exception { LocalFileSystem fs = FileSystem.getLocal(conf); if (fs.exists(TEST_ROOT) && !fs.delete(TEST_ROOT, true)) { Assert.fail("Can't clean up test root dir"); } fs.mkdirs(TEST_ROOT); }
Example 8
Source File: TestMapFile.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setup() throws Exception { LocalFileSystem fs = FileSystem.getLocal(conf); if (fs.exists(TEST_DIR) && !fs.delete(TEST_DIR, true)) { Assert.fail("Can't clean up test root dir"); } fs.mkdirs(TEST_DIR); }
Example 9
Source File: MapReduceTest.java From vespa with Apache License 2.0 | 5 votes |
@AfterClass public static void tearDown() throws IOException { Path testDir = new Path(hdfsBaseDir.getParent()); hdfs.delete(testDir, true); cluster.shutdown(); LocalFileSystem localFileSystem = FileSystem.getLocal(conf); localFileSystem.delete(testDir, true); }
Example 10
Source File: TestFSInputChecker.java From RDFS with Apache License 2.0 | 5 votes |
private void testFileCorruption(LocalFileSystem fileSys) throws IOException { // create a file and verify that checksum corruption results in // a checksum exception on LocalFS String dir = System.getProperty("test.build.data", "."); Path file = new Path(dir + "/corruption-test.dat"); Path crcFile = new Path(dir + "/.corruption-test.dat.crc"); writeFile(fileSys, file); int fileLen = (int)fileSys.getFileStatus(file).getLen(); byte [] buf = new byte[fileLen]; InputStream in = fileSys.open(file); IOUtils.readFully(in, buf, 0, buf.length); in.close(); // check .crc corruption checkFileCorruption(fileSys, file, crcFile); fileSys.delete(file, true); writeFile(fileSys, file); // check data corrutpion checkFileCorruption(fileSys, file, file); fileSys.delete(file, true); }
Example 11
Source File: ClusterWithCapacityScheduler.java From RDFS with Apache License 2.0 | 5 votes |
private void cleanUpSchedulerConfigFile() throws IOException { LocalFileSystem fs = FileSystem.getLocal(new Configuration()); String myResourcePath = System.getProperty("test.build.data"); Path schedulerConfigFilePath = new Path(myResourcePath, CapacitySchedulerConf.SCHEDULER_CONF_FILE); fs.delete(schedulerConfigFilePath, false); }
Example 12
Source File: TestFSInputChecker.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private void testFileCorruption(LocalFileSystem fileSys) throws IOException { // create a file and verify that checksum corruption results in // a checksum exception on LocalFS String dir = System.getProperty("test.build.data", "."); Path file = new Path(dir + "/corruption-test.dat"); Path crcFile = new Path(dir + "/.corruption-test.dat.crc"); writeFile(fileSys, file); int fileLen = (int)fileSys.getFileStatus(file).getLen(); byte [] buf = new byte[fileLen]; InputStream in = fileSys.open(file); IOUtils.readFully(in, buf, 0, buf.length); in.close(); // check .crc corruption checkFileCorruption(fileSys, file, crcFile); fileSys.delete(file, true); writeFile(fileSys, file); // check data corrutpion checkFileCorruption(fileSys, file, file); fileSys.delete(file, true); }
Example 13
Source File: ClusterWithCapacityScheduler.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private void cleanUpSchedulerConfigFile() throws IOException { LocalFileSystem fs = FileSystem.getLocal(new Configuration()); String myResourcePath = System.getProperty("test.build.data"); Path schedulerConfigFilePath = new Path(myResourcePath, CapacitySchedulerConf.SCHEDULER_CONF_FILE); fs.delete(schedulerConfigFilePath, false); }
Example 14
Source File: S3AFileSystem.java From hadoop with Apache License 2.0 | 4 votes |
/** * The src file is on the local disk. Add it to FS at * the given dst name. * * This version doesn't need to create a temporary file to calculate the md5. * Sadly this doesn't seem to be used by the shell cp :( * * delSrc indicates if the source should be removed * @param delSrc whether to delete the src * @param overwrite whether to overwrite an existing file * @param src path * @param dst path */ @Override public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException { String key = pathToKey(dst); if (!overwrite && exists(dst)) { throw new IOException(dst + " already exists"); } if (LOG.isDebugEnabled()) { LOG.debug("Copying local file from " + src + " to " + dst); } // Since we have a local file, we don't need to stream into a temporary file LocalFileSystem local = getLocal(getConf()); File srcfile = local.pathToFile(src); final ObjectMetadata om = new ObjectMetadata(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; default: break; } } }; Upload up = transfers.upload(putObjectRequest); up.addProgressListener(progressListener); try { up.waitForUploadResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } // This will delete unnecessary fake parent directories finishedWrite(key); if (delSrc) { local.delete(src, false); } }
Example 15
Source File: S3AFileSystem.java From big-c with Apache License 2.0 | 4 votes |
/** * The src file is on the local disk. Add it to FS at * the given dst name. * * This version doesn't need to create a temporary file to calculate the md5. * Sadly this doesn't seem to be used by the shell cp :( * * delSrc indicates if the source should be removed * @param delSrc whether to delete the src * @param overwrite whether to overwrite an existing file * @param src path * @param dst path */ @Override public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException { String key = pathToKey(dst); if (!overwrite && exists(dst)) { throw new IOException(dst + " already exists"); } if (LOG.isDebugEnabled()) { LOG.debug("Copying local file from " + src + " to " + dst); } // Since we have a local file, we don't need to stream into a temporary file LocalFileSystem local = getLocal(getConf()); File srcfile = local.pathToFile(src); final ObjectMetadata om = new ObjectMetadata(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; default: break; } } }; Upload up = transfers.upload(putObjectRequest); up.addProgressListener(progressListener); try { up.waitForUploadResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } // This will delete unnecessary fake parent directories finishedWrite(key); if (delSrc) { local.delete(src, false); } }