Java Code Examples for org.apache.hadoop.fs.FileSystem#truncate()
The following examples show how to use
org.apache.hadoop.fs.FileSystem#truncate() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseTestHttpFSWith.java From hadoop with Apache License 2.0 | 6 votes |
private void testTruncate() throws Exception { if (!isLocalFS()) { final short repl = 3; final int blockSize = 1024; final int numOfBlocks = 2; FileSystem fs = FileSystem.get(getProxiedFSConf()); fs.mkdirs(getProxiedFSTestDir()); Path file = new Path(getProxiedFSTestDir(), "foo.txt"); final byte[] data = FileSystemTestHelper.getFileData( numOfBlocks, blockSize); FileSystemTestHelper.createFile(fs, file, data, blockSize, repl); final int newLength = blockSize; boolean isReady = fs.truncate(file, newLength); Assert.assertTrue("Recovery is not expected.", isReady); FileStatus fileStatus = fs.getFileStatus(file); Assert.assertEquals(fileStatus.getLen(), newLength); AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString()); fs.close(); } }
Example 2
Source File: BaseTestHttpFSWith.java From big-c with Apache License 2.0 | 6 votes |
private void testTruncate() throws Exception { if (!isLocalFS()) { final short repl = 3; final int blockSize = 1024; final int numOfBlocks = 2; FileSystem fs = FileSystem.get(getProxiedFSConf()); fs.mkdirs(getProxiedFSTestDir()); Path file = new Path(getProxiedFSTestDir(), "foo.txt"); final byte[] data = FileSystemTestHelper.getFileData( numOfBlocks, blockSize); FileSystemTestHelper.createFile(fs, file, data, blockSize, repl); final int newLength = blockSize; boolean isReady = fs.truncate(file, newLength); Assert.assertTrue("Recovery is not expected.", isReady); FileStatus fileStatus = fs.getFileStatus(file); Assert.assertEquals(fileStatus.getLen(), newLength); AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString()); fs.close(); } }
Example 3
Source File: TestDagManifestFileScanner.java From tez with Apache License 2.0 | 6 votes |
private void corruptFiles() throws IOException { int op = 0; Configuration conf = manifestLogger.getConfig(); Path base = new Path( conf.get(TezConfiguration.TEZ_HISTORY_LOGGING_PROTO_BASE_DIR) + "/dag_meta"); FileSystem fs = base.getFileSystem(conf); for (FileStatus status : fs.listStatus(base)) { if (status.isDirectory()) { for (FileStatus file : fs.listStatus(status.getPath())) { if (!file.getPath().getName().startsWith("application_")) { continue; } switch (op) { case 0: case 1: fs.truncate(file.getPath(), op == 1 ? 0 : file.getLen() - 20); break; case 3: deleteFilePath = file.getPath(); break; } op++; } } } }
Example 4
Source File: TestHAAppend.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test to verify the processing of PendingDataNodeMessageQueue in case of * append. One block will marked as corrupt if the OP_ADD, OP_UPDATE_BLOCKS * comes in one edit log segment and OP_CLOSE edit comes in next log segment * which is loaded during failover. Regression test for HDFS-3605. */ @Test public void testMultipleAppendsDuringCatchupTailing() throws Exception { Configuration conf = new Configuration(); // Set a length edits tailing period, and explicit rolling, so we can // control the ingest of edits by the standby for this test. conf.set(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, "5000"); conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, -1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(3).build(); FileSystem fs = null; try { cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); Path fileToAppend = new Path("/FileToAppend"); Path fileToTruncate = new Path("/FileToTruncate"); final byte[] data = new byte[1 << 16]; DFSUtil.getRandom().nextBytes(data); final int[] appendPos = AppendTestUtil.randomFilePartition( data.length, COUNT); final int[] truncatePos = AppendTestUtil.randomFilePartition( data.length, 1); // Create file, write some data, and hflush so that the first // block is in the edit log prior to roll. FSDataOutputStream out = createAndHflush( fs, fileToAppend, data, appendPos[0]); FSDataOutputStream out4Truncate = createAndHflush( fs, fileToTruncate, data, data.length); // Let the StandbyNode catch the creation of the file. cluster.getNameNode(0).getRpcServer().rollEditLog(); cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits(); out.close(); out4Truncate.close(); // Append and re-close a few time, so that many block entries are queued. for (int i = 0; i < COUNT; i++) { int end = i < COUNT - 1? appendPos[i + 1]: data.length; out = fs.append(fileToAppend); out.write(data, appendPos[i], end - appendPos[i]); out.close(); } boolean isTruncateReady = fs.truncate(fileToTruncate, truncatePos[0]); // Ensure that blocks have been reported to the SBN ahead of the edits // arriving. cluster.triggerBlockReports(); // Failover the current standby to active. cluster.shutdownNameNode(0); cluster.transitionToActive(1); // Check the FSCK doesn't detect any bad blocks on the SBN. int rc = ToolRunner.run(new DFSck(cluster.getConfiguration(1)), new String[] { "/", "-files", "-blocks" }); assertEquals(0, rc); assertEquals("CorruptBlocks should be empty.", 0, cluster.getNameNode(1) .getNamesystem().getCorruptReplicaBlocks()); AppendTestUtil.checkFullFile(fs, fileToAppend, data.length, data, fileToAppend.toString()); if (!isTruncateReady) { TestFileTruncate.checkBlockRecovery(fileToTruncate, cluster.getFileSystem(1)); } AppendTestUtil.checkFullFile(fs, fileToTruncate, truncatePos[0], data, fileToTruncate.toString()); } finally { if (null != cluster) { cluster.shutdown(); } if (null != fs) { fs.close(); } } }
Example 5
Source File: TestHAAppend.java From big-c with Apache License 2.0 | 4 votes |
/** * Test to verify the processing of PendingDataNodeMessageQueue in case of * append. One block will marked as corrupt if the OP_ADD, OP_UPDATE_BLOCKS * comes in one edit log segment and OP_CLOSE edit comes in next log segment * which is loaded during failover. Regression test for HDFS-3605. */ @Test public void testMultipleAppendsDuringCatchupTailing() throws Exception { Configuration conf = new Configuration(); // Set a length edits tailing period, and explicit rolling, so we can // control the ingest of edits by the standby for this test. conf.set(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, "5000"); conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, -1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(3).build(); FileSystem fs = null; try { cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); Path fileToAppend = new Path("/FileToAppend"); Path fileToTruncate = new Path("/FileToTruncate"); final byte[] data = new byte[1 << 16]; DFSUtil.getRandom().nextBytes(data); final int[] appendPos = AppendTestUtil.randomFilePartition( data.length, COUNT); final int[] truncatePos = AppendTestUtil.randomFilePartition( data.length, 1); // Create file, write some data, and hflush so that the first // block is in the edit log prior to roll. FSDataOutputStream out = createAndHflush( fs, fileToAppend, data, appendPos[0]); FSDataOutputStream out4Truncate = createAndHflush( fs, fileToTruncate, data, data.length); // Let the StandbyNode catch the creation of the file. cluster.getNameNode(0).getRpcServer().rollEditLog(); cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits(); out.close(); out4Truncate.close(); // Append and re-close a few time, so that many block entries are queued. for (int i = 0; i < COUNT; i++) { int end = i < COUNT - 1? appendPos[i + 1]: data.length; out = fs.append(fileToAppend); out.write(data, appendPos[i], end - appendPos[i]); out.close(); } boolean isTruncateReady = fs.truncate(fileToTruncate, truncatePos[0]); // Ensure that blocks have been reported to the SBN ahead of the edits // arriving. cluster.triggerBlockReports(); // Failover the current standby to active. cluster.shutdownNameNode(0); cluster.transitionToActive(1); // Check the FSCK doesn't detect any bad blocks on the SBN. int rc = ToolRunner.run(new DFSck(cluster.getConfiguration(1)), new String[] { "/", "-files", "-blocks" }); assertEquals(0, rc); assertEquals("CorruptBlocks should be empty.", 0, cluster.getNameNode(1) .getNamesystem().getCorruptReplicaBlocks()); AppendTestUtil.checkFullFile(fs, fileToAppend, data.length, data, fileToAppend.toString()); if (!isTruncateReady) { TestFileTruncate.checkBlockRecovery(fileToTruncate, cluster.getFileSystem(1)); } AppendTestUtil.checkFullFile(fs, fileToTruncate, truncatePos[0], data, fileToTruncate.toString()); } finally { if (null != cluster) { cluster.shutdown(); } if (null != fs) { fs.close(); } } }
Example 6
Source File: FSOperations.java From hadoop with Apache License 2.0 | 3 votes |
/** * Executes the filesystem operation. * * @param fs filesystem instance to use. * * @return <code>true</code> if the file has been truncated to the desired, * <code>false</code> if a background process of adjusting the * length of the last block has been started, and clients should * wait for it to complete before proceeding with further file * updates. * * @throws IOException thrown if an IO error occured. */ @Override public JSONObject execute(FileSystem fs) throws IOException { boolean result = fs.truncate(path, newLength); return toJSON( StringUtils.toLowerCase(HttpFSFileSystem.TRUNCATE_JSON), result); }
Example 7
Source File: FSOperations.java From big-c with Apache License 2.0 | 3 votes |
/** * Executes the filesystem operation. * * @param fs filesystem instance to use. * * @return <code>true</code> if the file has been truncated to the desired, * <code>false</code> if a background process of adjusting the * length of the last block has been started, and clients should * wait for it to complete before proceeding with further file * updates. * * @throws IOException thrown if an IO error occured. */ @Override public JSONObject execute(FileSystem fs) throws IOException { boolean result = fs.truncate(path, newLength); return toJSON( StringUtils.toLowerCase(HttpFSFileSystem.TRUNCATE_JSON), result); }