Java Code Examples for org.apache.hadoop.hdfs.DistributedFileSystem#setStoragePolicy()
The following examples show how to use
org.apache.hadoop.hdfs.DistributedFileSystem#setStoragePolicy() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StoragePolicyAdmin.java From hadoop with Apache License 2.0 | 5 votes |
@Override public int run(Configuration conf, List<String> args) throws IOException { final String path = StringUtils.popOptionWithArgument("-path", args); if (path == null) { System.err.println("Please specify the path for setting the storage " + "policy.\nUsage: " + getLongUsage()); return 1; } final String policyName = StringUtils.popOptionWithArgument("-policy", args); if (policyName == null) { System.err.println("Please specify the policy name.\nUsage: " + getLongUsage()); return 1; } final DistributedFileSystem dfs = AdminHelper.getDFS(conf); try { dfs.setStoragePolicy(new Path(path), policyName); System.out.println("Set storage policy " + policyName + " on " + path); } catch (Exception e) { System.err.println(AdminHelper.prettifyException(e)); return 2; } return 0; }
Example 2
Source File: TestMover.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testMoverFailedRetry() throws Exception { // HDFS-8147 final Configuration conf = new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2"); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(3) .storageTypes( new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE}, {StorageType.DISK, StorageType.ARCHIVE}, {StorageType.DISK, StorageType.ARCHIVE}}).build(); try { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final String file = "/testMoverFailedRetry"; // write to DISK final FSDataOutputStream out = dfs.create(new Path(file), (short) 2); out.writeChars("testMoverFailedRetry"); out.close(); // Delete block file so, block move will fail with FileNotFoundException LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0); cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock()); // move to ARCHIVE dfs.setStoragePolicy(new Path(file), "COLD"); int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] {"-p", file.toString()}); Assert.assertEquals("Movement should fail after some retry", ExitStatus.IO_EXCEPTION.getExitCode(), rc); } finally { cluster.shutdown(); } }
Example 3
Source File: TestFsck.java From hadoop with Apache License 2.0 | 5 votes |
private void writeFile(final DistributedFileSystem dfs, String dirName, String fileName, String StoragePolicy) throws IOException { Path dirPath = new Path(dirName); dfs.mkdirs(dirPath); dfs.setStoragePolicy(dirPath, StoragePolicy); writeFile(dfs, dirPath, fileName); }
Example 4
Source File: TestFsck.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test storage policy display */ @Test public void testStoragePoliciesCK() throws Exception { final Configuration conf = new HdfsConfiguration(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(3) .storageTypes( new StorageType[] {StorageType.DISK, StorageType.ARCHIVE}) .build(); try { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); writeFile(dfs, "/testhot", "file", "HOT"); writeFile(dfs, "/testwarm", "file", "WARM"); writeFile(dfs, "/testcold", "file", "COLD"); String outStr = runFsck(conf, 0, true, "/", "-storagepolicies"); assertTrue(outStr.contains("DISK:3(HOT)")); assertTrue(outStr.contains("DISK:1,ARCHIVE:2(WARM)")); assertTrue(outStr.contains("ARCHIVE:3(COLD)")); assertTrue(outStr.contains("All blocks satisfy specified storage policy.")); dfs.setStoragePolicy(new Path("/testhot"), "COLD"); dfs.setStoragePolicy(new Path("/testwarm"), "COLD"); outStr = runFsck(conf, 0, true, "/", "-storagepolicies"); assertTrue(outStr.contains("DISK:3(HOT)")); assertTrue(outStr.contains("DISK:1,ARCHIVE:2(WARM)")); assertTrue(outStr.contains("ARCHIVE:3(COLD)")); assertFalse(outStr.contains("All blocks satisfy specified storage policy.")); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 5
Source File: StoragePolicyAdmin.java From big-c with Apache License 2.0 | 5 votes |
@Override public int run(Configuration conf, List<String> args) throws IOException { final String path = StringUtils.popOptionWithArgument("-path", args); if (path == null) { System.err.println("Please specify the path for setting the storage " + "policy.\nUsage: " + getLongUsage()); return 1; } final String policyName = StringUtils.popOptionWithArgument("-policy", args); if (policyName == null) { System.err.println("Please specify the policy name.\nUsage: " + getLongUsage()); return 1; } final DistributedFileSystem dfs = AdminHelper.getDFS(conf); try { dfs.setStoragePolicy(new Path(path), policyName); System.out.println("Set storage policy " + policyName + " on " + path); } catch (Exception e) { System.err.println(AdminHelper.prettifyException(e)); return 2; } return 0; }
Example 6
Source File: TestMover.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testMoverFailedRetry() throws Exception { // HDFS-8147 final Configuration conf = new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2"); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(3) .storageTypes( new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE}, {StorageType.DISK, StorageType.ARCHIVE}, {StorageType.DISK, StorageType.ARCHIVE}}).build(); try { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final String file = "/testMoverFailedRetry"; // write to DISK final FSDataOutputStream out = dfs.create(new Path(file), (short) 2); out.writeChars("testMoverFailedRetry"); out.close(); // Delete block file so, block move will fail with FileNotFoundException LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0); cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock()); // move to ARCHIVE dfs.setStoragePolicy(new Path(file), "COLD"); int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] {"-p", file.toString()}); Assert.assertEquals("Movement should fail after some retry", ExitStatus.IO_EXCEPTION.getExitCode(), rc); } finally { cluster.shutdown(); } }
Example 7
Source File: TestFsck.java From big-c with Apache License 2.0 | 5 votes |
private void writeFile(final DistributedFileSystem dfs, String dirName, String fileName, String StoragePolicy) throws IOException { Path dirPath = new Path(dirName); dfs.mkdirs(dirPath); dfs.setStoragePolicy(dirPath, StoragePolicy); writeFile(dfs, dirPath, fileName); }
Example 8
Source File: TestFsck.java From big-c with Apache License 2.0 | 5 votes |
/** * Test storage policy display */ @Test public void testStoragePoliciesCK() throws Exception { final Configuration conf = new HdfsConfiguration(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(3) .storageTypes( new StorageType[] {StorageType.DISK, StorageType.ARCHIVE}) .build(); try { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); writeFile(dfs, "/testhot", "file", "HOT"); writeFile(dfs, "/testwarm", "file", "WARM"); writeFile(dfs, "/testcold", "file", "COLD"); String outStr = runFsck(conf, 0, true, "/", "-storagepolicies"); assertTrue(outStr.contains("DISK:3(HOT)")); assertTrue(outStr.contains("DISK:1,ARCHIVE:2(WARM)")); assertTrue(outStr.contains("ARCHIVE:3(COLD)")); assertTrue(outStr.contains("All blocks satisfy specified storage policy.")); dfs.setStoragePolicy(new Path("/testhot"), "COLD"); dfs.setStoragePolicy(new Path("/testwarm"), "COLD"); outStr = runFsck(conf, 0, true, "/", "-storagepolicies"); assertTrue(outStr.contains("DISK:3(HOT)")); assertTrue(outStr.contains("DISK:1,ARCHIVE:2(WARM)")); assertTrue(outStr.contains("ARCHIVE:3(COLD)")); assertFalse(outStr.contains("All blocks satisfy specified storage policy.")); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 9
Source File: TestStorageMover.java From hadoop with Apache License 2.0 | 4 votes |
/** * Set storage policies according to the corresponding scheme. */ void setStoragePolicy(DistributedFileSystem dfs) throws Exception { for (Map.Entry<Path, BlockStoragePolicy> entry : policyMap.entrySet()) { dfs.setStoragePolicy(entry.getKey(), entry.getValue().getName()); } }
Example 10
Source File: TestBalancer.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test special case. Two replicas belong to same block should not in same node. * We have 2 nodes. * We have a block in (DN0,SSD) and (DN1,DISK). * Replica in (DN0,SSD) should not be moved to (DN1,SSD). * Otherwise DN1 has 2 replicas. */ @Test(timeout=100000) public void testTwoReplicaShouldNotInSameDN() throws Exception { final Configuration conf = new HdfsConfiguration(); int blockSize = 5 * 1024 * 1024 ; conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L); int numOfDatanodes =2; final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(2) .racks(new String[]{"/default/rack0", "/default/rack0"}) .storagesPerDatanode(2) .storageTypes(new StorageType[][]{ {StorageType.SSD, StorageType.DISK}, {StorageType.SSD, StorageType.DISK}}) .storageCapacities(new long[][]{ {100 * blockSize, 20 * blockSize}, {20 * blockSize, 100 * blockSize}}) .build(); try { cluster.waitActive(); //set "/bar" directory with ONE_SSD storage policy. DistributedFileSystem fs = cluster.getFileSystem(); Path barDir = new Path("/bar"); fs.mkdir(barDir,new FsPermission((short)777)); fs.setStoragePolicy(barDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); // Insert 30 blocks. So (DN0,SSD) and (DN1,DISK) are about half full, // and (DN0,SSD) and (DN1,DISK) are about 15% full. long fileLen = 30 * blockSize; // fooFile has ONE_SSD policy. So // (DN0,SSD) and (DN1,DISK) have 2 replicas belong to same block. // (DN0,DISK) and (DN1,SSD) have 2 replicas belong to same block. Path fooFile = new Path(barDir, "foo"); createFile(cluster, fooFile, fileLen, (short) numOfDatanodes, 0); // update space info cluster.triggerHeartbeats(); Balancer.Parameters p = Balancer.Parameters.DEFAULT; Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); final int r = Balancer.run(namenodes, p, conf); // Replica in (DN0,SSD) was not moved to (DN1,SSD), because (DN1,DISK) // already has one. Otherwise DN1 will have 2 replicas. // For same reason, no replicas were moved. assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r); } finally { cluster.shutdown(); } }
Example 11
Source File: TestStorageMover.java From big-c with Apache License 2.0 | 4 votes |
/** * Set storage policies according to the corresponding scheme. */ void setStoragePolicy(DistributedFileSystem dfs) throws Exception { for (Map.Entry<Path, BlockStoragePolicy> entry : policyMap.entrySet()) { dfs.setStoragePolicy(entry.getKey(), entry.getValue().getName()); } }
Example 12
Source File: TestBalancer.java From big-c with Apache License 2.0 | 4 votes |
/** * Test special case. Two replicas belong to same block should not in same node. * We have 2 nodes. * We have a block in (DN0,SSD) and (DN1,DISK). * Replica in (DN0,SSD) should not be moved to (DN1,SSD). * Otherwise DN1 has 2 replicas. */ @Test(timeout=100000) public void testTwoReplicaShouldNotInSameDN() throws Exception { final Configuration conf = new HdfsConfiguration(); int blockSize = 5 * 1024 * 1024 ; conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L); int numOfDatanodes =2; final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(2) .racks(new String[]{"/default/rack0", "/default/rack0"}) .storagesPerDatanode(2) .storageTypes(new StorageType[][]{ {StorageType.SSD, StorageType.DISK}, {StorageType.SSD, StorageType.DISK}}) .storageCapacities(new long[][]{ {100 * blockSize, 20 * blockSize}, {20 * blockSize, 100 * blockSize}}) .build(); try { cluster.waitActive(); //set "/bar" directory with ONE_SSD storage policy. DistributedFileSystem fs = cluster.getFileSystem(); Path barDir = new Path("/bar"); fs.mkdir(barDir,new FsPermission((short)777)); fs.setStoragePolicy(barDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); // Insert 30 blocks. So (DN0,SSD) and (DN1,DISK) are about half full, // and (DN0,SSD) and (DN1,DISK) are about 15% full. long fileLen = 30 * blockSize; // fooFile has ONE_SSD policy. So // (DN0,SSD) and (DN1,DISK) have 2 replicas belong to same block. // (DN0,DISK) and (DN1,SSD) have 2 replicas belong to same block. Path fooFile = new Path(barDir, "foo"); createFile(cluster, fooFile, fileLen, (short) numOfDatanodes, 0); // update space info cluster.triggerHeartbeats(); Balancer.Parameters p = Balancer.Parameters.DEFAULT; Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); final int r = Balancer.run(namenodes, p, conf); // Replica in (DN0,SSD) was not moved to (DN1,SSD), because (DN1,DISK) // already has one. Otherwise DN1 will have 2 replicas. // For same reason, no replicas were moved. assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r); } finally { cluster.shutdown(); } }