Java Code Examples for org.apache.hadoop.fs.BlockLocation#getNames()
The following examples show how to use
org.apache.hadoop.fs.BlockLocation#getNames() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFavoredNodesEndToEnd.java From hadoop with Apache License 2.0 | 6 votes |
@Test(timeout=180000) public void testFavoredNodesEndToEnd() throws Exception { //create 10 files with random preferred nodes for (int i = 0; i < NUM_FILES; i++) { Random rand = new Random(System.currentTimeMillis() + i); //pass a new created rand so as to get a uniform distribution each time //without too much collisions (look at the do-while loop in getDatanodes) InetSocketAddress datanode[] = getDatanodes(rand); Path p = new Path("/filename"+i); FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true, 4096, (short)3, 4096L, null, datanode); out.write(SOME_BYTES); out.close(); BlockLocation[] locations = getBlockLocations(p); //verify the files got created in the right nodes for (BlockLocation loc : locations) { String[] hosts = loc.getNames(); String[] hosts1 = getStringForInetSocketAddrs(datanode); assertTrue(compareNodes(hosts, hosts1)); } } }
Example 2
Source File: TestFavoredNodesEndToEnd.java From hadoop with Apache License 2.0 | 6 votes |
@Test(timeout = 180000) public void testFavoredNodesEndToEndForAppend() throws Exception { // create 10 files with random preferred nodes for (int i = 0; i < NUM_FILES; i++) { Random rand = new Random(System.currentTimeMillis() + i); // pass a new created rand so as to get a uniform distribution each time // without too much collisions (look at the do-while loop in getDatanodes) InetSocketAddress datanode[] = getDatanodes(rand); Path p = new Path("/filename" + i); // create and close the file. dfs.create(p, FsPermission.getDefault(), true, 4096, (short) 3, 4096L, null, null).close(); // re-open for append FSDataOutputStream out = dfs.append(p, EnumSet.of(CreateFlag.APPEND), 4096, null, datanode); out.write(SOME_BYTES); out.close(); BlockLocation[] locations = getBlockLocations(p); // verify the files got created in the right nodes for (BlockLocation loc : locations) { String[] hosts = loc.getNames(); String[] hosts1 = getStringForInetSocketAddrs(datanode); assertTrue(compareNodes(hosts, hosts1)); } } }
Example 3
Source File: TestFavoredNodesEndToEnd.java From big-c with Apache License 2.0 | 6 votes |
@Test(timeout=180000) public void testFavoredNodesEndToEnd() throws Exception { //create 10 files with random preferred nodes for (int i = 0; i < NUM_FILES; i++) { Random rand = new Random(System.currentTimeMillis() + i); //pass a new created rand so as to get a uniform distribution each time //without too much collisions (look at the do-while loop in getDatanodes) InetSocketAddress datanode[] = getDatanodes(rand); Path p = new Path("/filename"+i); FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true, 4096, (short)3, 4096L, null, datanode); out.write(SOME_BYTES); out.close(); BlockLocation[] locations = getBlockLocations(p); //verify the files got created in the right nodes for (BlockLocation loc : locations) { String[] hosts = loc.getNames(); String[] hosts1 = getStringForInetSocketAddrs(datanode); assertTrue(compareNodes(hosts, hosts1)); } } }
Example 4
Source File: TestFavoredNodesEndToEnd.java From big-c with Apache License 2.0 | 6 votes |
@Test(timeout = 180000) public void testFavoredNodesEndToEndForAppend() throws Exception { // create 10 files with random preferred nodes for (int i = 0; i < NUM_FILES; i++) { Random rand = new Random(System.currentTimeMillis() + i); // pass a new created rand so as to get a uniform distribution each time // without too much collisions (look at the do-while loop in getDatanodes) InetSocketAddress datanode[] = getDatanodes(rand); Path p = new Path("/filename" + i); // create and close the file. dfs.create(p, FsPermission.getDefault(), true, 4096, (short) 3, 4096L, null, null).close(); // re-open for append FSDataOutputStream out = dfs.append(p, EnumSet.of(CreateFlag.APPEND), 4096, null, datanode); out.write(SOME_BYTES); out.close(); BlockLocation[] locations = getBlockLocations(p); // verify the files got created in the right nodes for (BlockLocation loc : locations) { String[] hosts = loc.getNames(); String[] hosts1 = getStringForInetSocketAddrs(datanode); assertTrue(compareNodes(hosts, hosts1)); } } }
Example 5
Source File: TestSwiftFileSystemBlockLocation.java From hadoop with Apache License 2.0 | 5 votes |
private void assertLocationValid(BlockLocation location) throws IOException { LOG.info(location); String[] hosts = location.getHosts(); String[] names = location.getNames(); assertNotEqual("No hosts supplied for " + location, 0, hosts.length); //for every host, there's a name. assertEquals("Unequal names and hosts in " + location, hosts.length, names.length); assertEquals(SwiftProtocolConstants.BLOCK_LOCATION, location.getNames()[0]); assertEquals(SwiftProtocolConstants.TOPOLOGY_PATH, location.getTopologyPaths()[0]); }
Example 6
Source File: Main.java From hdfs-metadata with GNU General Public License v3.0 | 5 votes |
private void printBlockMetadata(BlockLocation blockLocation, String[] dataDirs) throws IOException { System.out.println(" Offset: " + blockLocation.getOffset()); System.out.println(" Length: " + blockLocation.getLength()); String[] cachedHosts = blockLocation.getCachedHosts(); if (cachedHosts.length == 0) { System.out.println(" No cached hosts"); } System.out.println(" Replicas:"); VolumeId[] volumeIds = blockLocation instanceof BlockStorageLocation ? (((BlockStorageLocation) blockLocation).getVolumeIds()) : null; String[] hosts = blockLocation.getHosts(); String[] names = blockLocation.getNames(); String[] topologyPaths = blockLocation.getTopologyPaths(); for (int i = 0; i < topologyPaths.length; i++) { int diskId = volumeIds != null ? DistributedFileSystemMetadata.getDiskId(volumeIds[i]) : -1; System.out.println(" Replica (" + i + "):"); System.out.println(" Host: " + hosts[i]); if(diskId == -1) System.out.println(" DiskId: unknown"); else if(dataDirs != null && diskId < dataDirs.length) System.out.println(" Location: " + dataDirs[diskId] + " (DiskId: " + diskId + ")"); else System.out.println(" DiskId: " + diskId); System.out.println(" Name: " + names[i]); System.out.println(" TopologyPaths: " + topologyPaths[i]); } if (cachedHosts.length > 0) { System.out.println(" Cached hosts:"); for (String cachedHost : cachedHosts) { System.out.println(" Host: " + cachedHost); } } }
Example 7
Source File: TestSwiftFileSystemBlockLocation.java From big-c with Apache License 2.0 | 5 votes |
private void assertLocationValid(BlockLocation location) throws IOException { LOG.info(location); String[] hosts = location.getHosts(); String[] names = location.getNames(); assertNotEqual("No hosts supplied for " + location, 0, hosts.length); //for every host, there's a name. assertEquals("Unequal names and hosts in " + location, hosts.length, names.length); assertEquals(SwiftProtocolConstants.BLOCK_LOCATION, location.getNames()[0]); assertEquals(SwiftProtocolConstants.TOPOLOGY_PATH, location.getTopologyPaths()[0]); }
Example 8
Source File: HadoopIgfsSecondaryFileSystemDelegateImpl.java From ignite with Apache License 2.0 | 5 votes |
/** * Convert IGFS affinity block location into Hadoop affinity block location. * * @param block IGFS affinity block location. * @return Hadoop affinity block location. */ private IgfsBlockLocation convertBlockLocation(BlockLocation block) { try { String[] names = block.getNames(); String[] hosts = block.getHosts(); return new IgfsBlockLocationImpl( block.getOffset(), block.getLength(), Arrays.asList(names), Arrays.asList(hosts)); } catch (IOException e) { throw handleSecondaryFsError(e, "Failed convert block location: " + block); } }
Example 9
Source File: RaidShell.java From RDFS with Apache License 2.0 | 5 votes |
private boolean isBlockCorrupt(BlockLocation fileBlock) throws IOException { if (fileBlock == null) // empty block return false; return fileBlock.isCorrupt() || (fileBlock.getNames().length == 0 && fileBlock.getLength() > 0); }
Example 10
Source File: TestSwiftFileSystemBlockLocation.java From sahara-extra with Apache License 2.0 | 5 votes |
private void assertLocationValid(BlockLocation location) throws IOException { LOG.info(location); String[] hosts = location.getHosts(); String[] names = location.getNames(); assertNotEqual("No hosts supplied for " + location, 0, hosts.length); //for every host, there's a name. assertEquals("Unequal names and hosts in " + location, hosts.length, names.length); assertEquals(SwiftProtocolConstants.BLOCK_LOCATION, location.getNames()[0]); assertEquals(SwiftProtocolConstants.TOPOLOGY_PATH, location.getTopologyPaths()[0]); }
Example 11
Source File: TestFileAppend4.java From RDFS with Apache License 2.0 | 4 votes |
private void waitForBlockReplication(FileSystem whichfs, String filename, int expected, long maxWaitSec) throws IOException { long start = System.currentTimeMillis(); //wait for all the blocks to be replicated; LOG.info("Checking for block replication for " + filename); int iters = 0; while (true) { boolean replOk = true; BlockLocation[] bl = whichfs.getFileBlockLocations( whichfs.getFileStatus(file1), 0, BLOCK_SIZE); if(bl.length == 0) { replOk = false; } for (BlockLocation b : bl) { int actual = b.getNames().length; if ( actual < expected ) { LOG.info("Not enough replicas for " + b + " yet. Expecting " + expected + ", got " + actual + "."); replOk = false; break; } } if (replOk) { return; } iters++; if (maxWaitSec > 0 && (System.currentTimeMillis() - start) > (maxWaitSec * 1000)) { throw new IOException("Timedout while waiting for all blocks to " + " be replicated for " + filename); } try { Thread.sleep(1000); } catch (InterruptedException ignored) {} } }