org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FSNamesystem.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * Get all valid locations of the block & add the block to results * return the length of the added block; 0 if the block is not added */ private long addBlock(Block block, List<BlockWithLocations> results) { ArrayList<String> machineSet = new ArrayList<String>(blocksMap.numNodes(block)); for(Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block); it.hasNext();) { String storageID = it.next().getStorageID(); // filter invalidate replicas Collection<Block> blocks = recentInvalidateSets.get(storageID); if(blocks==null || !blocks.contains(block)) { machineSet.add(storageID); } } if(machineSet.size() == 0) { return 0; } else { results.add(new BlockWithLocations(block, machineSet.toArray(new String[machineSet.size()]))); return block.getNumBytes(); } }
Example #2
Source File: BlockManager.java From hadoop with Apache License 2.0 | 6 votes |
/** * Get all valid locations of the block & add the block to results * return the length of the added block; 0 if the block is not added */ private long addBlock(Block block, List<BlockWithLocations> results) { final List<DatanodeStorageInfo> locations = getValidLocations(block); if(locations.size() == 0) { return 0; } else { final String[] datanodeUuids = new String[locations.size()]; final String[] storageIDs = new String[datanodeUuids.length]; final StorageType[] storageTypes = new StorageType[datanodeUuids.length]; for(int i = 0; i < locations.size(); i++) { final DatanodeStorageInfo s = locations.get(i); datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid(); storageIDs[i] = s.getStorageID(); storageTypes[i] = s.getStorageType(); } results.add(new BlockWithLocations(block, datanodeUuids, storageIDs, storageTypes)); return block.getNumBytes(); } }
Example #3
Source File: BlockManager.java From big-c with Apache License 2.0 | 6 votes |
/** * Get all valid locations of the block & add the block to results * return the length of the added block; 0 if the block is not added */ private long addBlock(Block block, List<BlockWithLocations> results) { final List<DatanodeStorageInfo> locations = getValidLocations(block); if(locations.size() == 0) { return 0; } else { final String[] datanodeUuids = new String[locations.size()]; final String[] storageIDs = new String[datanodeUuids.length]; final StorageType[] storageTypes = new StorageType[datanodeUuids.length]; for(int i = 0; i < locations.size(); i++) { final DatanodeStorageInfo s = locations.get(i); datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid(); storageIDs[i] = s.getStorageID(); storageTypes[i] = s.getStorageType(); } results.add(new BlockWithLocations(block, datanodeUuids, storageIDs, storageTypes)); return block.getNumBytes(); } }
Example #4
Source File: TestPBHelper.java From big-c with Apache License 2.0 | 5 votes |
private static BlockWithLocations getBlockWithLocations(int bid) { final String[] datanodeUuids = {"dn1", "dn2", "dn3"}; final String[] storageIDs = {"s1", "s2", "s3"}; final StorageType[] storageTypes = { StorageType.DISK, StorageType.DISK, StorageType.DISK}; return new BlockWithLocations(new Block(bid, 0, 1), datanodeUuids, storageIDs, storageTypes); }
Example #5
Source File: TestPBHelper.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testConvertBlockWithLocations() { BlockWithLocations locs = getBlockWithLocations(1); BlockWithLocationsProto locsProto = PBHelper.convert(locs); BlockWithLocations locs2 = PBHelper.convert(locsProto); compare(locs, locs2); }
Example #6
Source File: PBHelper.java From big-c with Apache License 2.0 | 5 votes |
public static BlocksWithLocations convert(BlocksWithLocationsProto blocks) { List<BlockWithLocationsProto> b = blocks.getBlocksList(); BlockWithLocations[] ret = new BlockWithLocations[b.size()]; int i = 0; for (BlockWithLocationsProto entry : b) { ret[i++] = convert(entry); } return new BlocksWithLocations(ret); }
Example #7
Source File: PBHelper.java From big-c with Apache License 2.0 | 5 votes |
public static BlocksWithLocationsProto convert(BlocksWithLocations blks) { BlocksWithLocationsProto.Builder builder = BlocksWithLocationsProto .newBuilder(); for (BlockWithLocations b : blks.getBlocks()) { builder.addBlocks(convert(b)); } return builder.build(); }
Example #8
Source File: PBHelper.java From big-c with Apache License 2.0 | 5 votes |
public static BlockWithLocations convert(BlockWithLocationsProto b) { final List<String> datanodeUuids = b.getDatanodeUuidsList(); final List<String> storageUuids = b.getStorageUuidsList(); final List<StorageTypeProto> storageTypes = b.getStorageTypesList(); return new BlockWithLocations(convert(b.getBlock()), datanodeUuids.toArray(new String[datanodeUuids.size()]), storageUuids.toArray(new String[storageUuids.size()]), convertStorageTypes(storageTypes, storageUuids.size())); }
Example #9
Source File: PBHelper.java From big-c with Apache License 2.0 | 5 votes |
public static BlockWithLocationsProto convert(BlockWithLocations blk) { return BlockWithLocationsProto.newBuilder() .setBlock(convert(blk.getBlock())) .addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids())) .addAllStorageUuids(Arrays.asList(blk.getStorageIDs())) .addAllStorageTypes(convertStorageTypes(blk.getStorageTypes())) .build(); }
Example #10
Source File: Dispatcher.java From hadoop with Apache License 2.0 | 5 votes |
/** * Fetch new blocks of this source from namenode and update this source's * block list & {@link Dispatcher#globalBlocks}. * * @return the total size of the received blocks in the number of bytes. */ private long getBlockList() throws IOException { final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive); final BlocksWithLocations newBlocks = nnc.getBlocks(getDatanodeInfo(), size); long bytesReceived = 0; for (BlockWithLocations blk : newBlocks.getBlocks()) { bytesReceived += blk.getBlock().getNumBytes(); synchronized (globalBlocks) { final DBlock block = globalBlocks.get(blk.getBlock()); synchronized (block) { block.clearLocations(); // update locations final String[] datanodeUuids = blk.getDatanodeUuids(); final StorageType[] storageTypes = blk.getStorageTypes(); for (int i = 0; i < datanodeUuids.length; i++) { final StorageGroup g = storageGroupMap.get( datanodeUuids[i], storageTypes[i]); if (g != null) { // not unknown block.addLocation(g); } } } if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) { // filter bad candidates srcBlocks.add(block); } } } return bytesReceived; }
Example #11
Source File: Dispatcher.java From big-c with Apache License 2.0 | 5 votes |
/** * Fetch new blocks of this source from namenode and update this source's * block list & {@link Dispatcher#globalBlocks}. * * @return the total size of the received blocks in the number of bytes. */ private long getBlockList() throws IOException { final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive); final BlocksWithLocations newBlocks = nnc.getBlocks(getDatanodeInfo(), size); long bytesReceived = 0; for (BlockWithLocations blk : newBlocks.getBlocks()) { bytesReceived += blk.getBlock().getNumBytes(); synchronized (globalBlocks) { final DBlock block = globalBlocks.get(blk.getBlock()); synchronized (block) { block.clearLocations(); // update locations final String[] datanodeUuids = blk.getDatanodeUuids(); final StorageType[] storageTypes = blk.getStorageTypes(); for (int i = 0; i < datanodeUuids.length; i++) { final StorageGroup g = storageGroupMap.get( datanodeUuids[i], storageTypes[i]); if (g != null) { // not unknown block.addLocation(g); } } } if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) { // filter bad candidates srcBlocks.add(block); } } } return bytesReceived; }
Example #12
Source File: TestPBHelper.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testConvertBlocksWithLocations() { BlockWithLocations[] list = new BlockWithLocations[] { getBlockWithLocations(1), getBlockWithLocations(2) }; BlocksWithLocations locs = new BlocksWithLocations(list); BlocksWithLocationsProto locsProto = PBHelper.convert(locs); BlocksWithLocations locs2 = PBHelper.convert(locsProto); BlockWithLocations[] blocks = locs.getBlocks(); BlockWithLocations[] blocks2 = locs2.getBlocks(); assertEquals(blocks.length, blocks2.length); for (int i = 0; i < blocks.length; i++) { compare(blocks[i], blocks2[i]); } }
Example #13
Source File: TestPBHelper.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testConvertBlockWithLocations() { BlockWithLocations locs = getBlockWithLocations(1); BlockWithLocationsProto locsProto = PBHelper.convert(locs); BlockWithLocations locs2 = PBHelper.convert(locsProto); compare(locs, locs2); }
Example #14
Source File: TestPBHelper.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testConvertBlocksWithLocations() { BlockWithLocations[] list = new BlockWithLocations[] { getBlockWithLocations(1), getBlockWithLocations(2) }; BlocksWithLocations locs = new BlocksWithLocations(list); BlocksWithLocationsProto locsProto = PBHelper.convert(locs); BlocksWithLocations locs2 = PBHelper.convert(locsProto); BlockWithLocations[] blocks = locs.getBlocks(); BlockWithLocations[] blocks2 = locs2.getBlocks(); assertEquals(blocks.length, blocks2.length); for (int i = 0; i < blocks.length; i++) { compare(blocks[i], blocks2[i]); } }
Example #15
Source File: TestPBHelper.java From hadoop with Apache License 2.0 | 5 votes |
private static BlockWithLocations getBlockWithLocations(int bid) { final String[] datanodeUuids = {"dn1", "dn2", "dn3"}; final String[] storageIDs = {"s1", "s2", "s3"}; final StorageType[] storageTypes = { StorageType.DISK, StorageType.DISK, StorageType.DISK}; return new BlockWithLocations(new Block(bid, 0, 1), datanodeUuids, storageIDs, storageTypes); }
Example #16
Source File: FSNamesystem.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * return a list of blocks & their locations on <code>datanode</code> whose * total size is <code>size</code> * * @param datanode on which blocks are located * @param size total size of blocks */ synchronized BlocksWithLocations getBlocks(DatanodeID datanode, long size) throws IOException { checkSuperuserPrivilege(); DatanodeDescriptor node = getDatanode(datanode); if (node == null) { NameNode.stateChangeLog.warn("BLOCK* NameSystem.getBlocks: " + "Asking for blocks from an unrecorded node " + datanode.getName()); throw new IllegalArgumentException( "Unexpected exception. Got getBlocks message for datanode " + datanode.getName() + ", but there is no info for it"); } int numBlocks = node.numBlocks(); if(numBlocks == 0) { return new BlocksWithLocations(new BlockWithLocations[0]); } Iterator<Block> iter = node.getBlockIterator(); int startBlock = r.nextInt(numBlocks); // starting from a random block // skip blocks for(int i=0; i<startBlock; i++) { iter.next(); } List<BlockWithLocations> results = new ArrayList<BlockWithLocations>(); long totalSize = 0; while(totalSize<size && iter.hasNext()) { totalSize += addBlock(iter.next(), results); } if(totalSize<size) { iter = node.getBlockIterator(); // start from the beginning for(int i=0; i<startBlock&&totalSize<size; i++) { totalSize += addBlock(iter.next(), results); } } return new BlocksWithLocations( results.toArray(new BlockWithLocations[results.size()])); }
Example #17
Source File: PBHelper.java From hadoop with Apache License 2.0 | 5 votes |
public static BlocksWithLocations convert(BlocksWithLocationsProto blocks) { List<BlockWithLocationsProto> b = blocks.getBlocksList(); BlockWithLocations[] ret = new BlockWithLocations[b.size()]; int i = 0; for (BlockWithLocationsProto entry : b) { ret[i++] = convert(entry); } return new BlocksWithLocations(ret); }
Example #18
Source File: PBHelper.java From hadoop with Apache License 2.0 | 5 votes |
public static BlocksWithLocationsProto convert(BlocksWithLocations blks) { BlocksWithLocationsProto.Builder builder = BlocksWithLocationsProto .newBuilder(); for (BlockWithLocations b : blks.getBlocks()) { builder.addBlocks(convert(b)); } return builder.build(); }
Example #19
Source File: PBHelper.java From hadoop with Apache License 2.0 | 5 votes |
public static BlockWithLocations convert(BlockWithLocationsProto b) { final List<String> datanodeUuids = b.getDatanodeUuidsList(); final List<String> storageUuids = b.getStorageUuidsList(); final List<StorageTypeProto> storageTypes = b.getStorageTypesList(); return new BlockWithLocations(convert(b.getBlock()), datanodeUuids.toArray(new String[datanodeUuids.size()]), storageUuids.toArray(new String[storageUuids.size()]), convertStorageTypes(storageTypes, storageUuids.size())); }
Example #20
Source File: PBHelper.java From hadoop with Apache License 2.0 | 5 votes |
public static BlockWithLocationsProto convert(BlockWithLocations blk) { return BlockWithLocationsProto.newBuilder() .setBlock(convert(blk.getBlock())) .addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids())) .addAllStorageUuids(Arrays.asList(blk.getStorageIDs())) .addAllStorageTypes(convertStorageTypes(blk.getStorageTypes())) .build(); }
Example #21
Source File: BlockManager.java From big-c with Apache License 2.0 | 4 votes |
/** Get all blocks with location information from a datanode. */ private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, final long size) throws UnregisteredNodeException { final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode); if (node == null) { blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" + " unrecorded node {}", datanode); throw new HadoopIllegalArgumentException( "Datanode " + datanode + " not found."); } int numBlocks = node.numBlocks(); if(numBlocks == 0) { return new BlocksWithLocations(new BlockWithLocations[0]); } Iterator<BlockInfoContiguous> iter = node.getBlockIterator(); int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block // skip blocks for(int i=0; i<startBlock; i++) { iter.next(); } List<BlockWithLocations> results = new ArrayList<BlockWithLocations>(); long totalSize = 0; BlockInfoContiguous curBlock; while(totalSize<size && iter.hasNext()) { curBlock = iter.next(); if(!curBlock.isComplete()) continue; totalSize += addBlock(curBlock, results); } if(totalSize<size) { iter = node.getBlockIterator(); // start from the beginning for(int i=0; i<startBlock&&totalSize<size; i++) { curBlock = iter.next(); if(!curBlock.isComplete()) continue; totalSize += addBlock(curBlock, results); } } return new BlocksWithLocations( results.toArray(new BlockWithLocations[results.size()])); }
Example #22
Source File: TestPBHelper.java From big-c with Apache License 2.0 | 4 votes |
private void compare(BlockWithLocations locs1, BlockWithLocations locs2) { assertEquals(locs1.getBlock(), locs2.getBlock()); assertTrue(Arrays.equals(locs1.getStorageIDs(), locs2.getStorageIDs())); }
Example #23
Source File: TestGetBlocks.java From big-c with Apache License 2.0 | 4 votes |
/** test getBlocks */ @Test public void testGetBlocks() throws Exception { final Configuration CONF = new HdfsConfiguration(); final short REPLICATION_FACTOR = (short) 2; final int DEFAULT_BLOCK_SIZE = 1024; final Random r = new Random(); CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes( REPLICATION_FACTOR).build(); try { cluster.waitActive(); // create a file with two blocks FileSystem fs = cluster.getFileSystem(); FSDataOutputStream out = fs.create(new Path("/tmp.txt"), REPLICATION_FACTOR); byte[] data = new byte[1024]; long fileLen = 2 * DEFAULT_BLOCK_SIZE; long bytesToWrite = fileLen; while (bytesToWrite > 0) { r.nextBytes(data); int bytesToWriteNext = (1024 < bytesToWrite) ? 1024 : (int) bytesToWrite; out.write(data, 0, bytesToWriteNext); bytesToWrite -= bytesToWriteNext; } out.close(); // get blocks & data nodes List<LocatedBlock> locatedBlocks; DatanodeInfo[] dataNodes = null; boolean notWritten; do { final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), CONF); locatedBlocks = dfsclient.getNamenode() .getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks(); assertEquals(2, locatedBlocks.size()); notWritten = false; for (int i = 0; i < 2; i++) { dataNodes = locatedBlocks.get(i).getLocations(); if (dataNodes.length != REPLICATION_FACTOR) { notWritten = true; try { Thread.sleep(10); } catch (InterruptedException e) { } break; } } } while (notWritten); // get RPC client to namenode InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort()); NamenodeProtocol namenode = NameNodeProxies.createProxy(CONF, NameNode.getUri(addr), NamenodeProtocol.class).getProxy(); // get blocks of size fileLen from dataNodes[0] BlockWithLocations[] locs; locs = namenode.getBlocks(dataNodes[0], fileLen).getBlocks(); assertEquals(locs.length, 2); assertEquals(locs[0].getStorageIDs().length, 2); assertEquals(locs[1].getStorageIDs().length, 2); // get blocks of size BlockSize from dataNodes[0] locs = namenode.getBlocks(dataNodes[0], DEFAULT_BLOCK_SIZE).getBlocks(); assertEquals(locs.length, 1); assertEquals(locs[0].getStorageIDs().length, 2); // get blocks of size 1 from dataNodes[0] locs = namenode.getBlocks(dataNodes[0], 1).getBlocks(); assertEquals(locs.length, 1); assertEquals(locs[0].getStorageIDs().length, 2); // get blocks of size 0 from dataNodes[0] getBlocksWithException(namenode, dataNodes[0], 0); // get blocks of size -1 from dataNodes[0] getBlocksWithException(namenode, dataNodes[0], -1); // get blocks of size BlockSize from a non-existent datanode DatanodeInfo info = DFSTestUtil.getDatanodeInfo("1.2.3.4"); getBlocksWithException(namenode, info, 2); } finally { cluster.shutdown(); } }
Example #24
Source File: TestPBHelper.java From hadoop with Apache License 2.0 | 4 votes |
private void compare(BlockWithLocations locs1, BlockWithLocations locs2) { assertEquals(locs1.getBlock(), locs2.getBlock()); assertTrue(Arrays.equals(locs1.getStorageIDs(), locs2.getStorageIDs())); }
Example #25
Source File: TestGetBlocks.java From hadoop with Apache License 2.0 | 4 votes |
/** test getBlocks */ @Test public void testGetBlocks() throws Exception { final Configuration CONF = new HdfsConfiguration(); final short REPLICATION_FACTOR = (short) 2; final int DEFAULT_BLOCK_SIZE = 1024; final Random r = new Random(); CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes( REPLICATION_FACTOR).build(); try { cluster.waitActive(); // create a file with two blocks FileSystem fs = cluster.getFileSystem(); FSDataOutputStream out = fs.create(new Path("/tmp.txt"), REPLICATION_FACTOR); byte[] data = new byte[1024]; long fileLen = 2 * DEFAULT_BLOCK_SIZE; long bytesToWrite = fileLen; while (bytesToWrite > 0) { r.nextBytes(data); int bytesToWriteNext = (1024 < bytesToWrite) ? 1024 : (int) bytesToWrite; out.write(data, 0, bytesToWriteNext); bytesToWrite -= bytesToWriteNext; } out.close(); // get blocks & data nodes List<LocatedBlock> locatedBlocks; DatanodeInfo[] dataNodes = null; boolean notWritten; do { final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), CONF); locatedBlocks = dfsclient.getNamenode() .getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks(); assertEquals(2, locatedBlocks.size()); notWritten = false; for (int i = 0; i < 2; i++) { dataNodes = locatedBlocks.get(i).getLocations(); if (dataNodes.length != REPLICATION_FACTOR) { notWritten = true; try { Thread.sleep(10); } catch (InterruptedException e) { } break; } } } while (notWritten); // get RPC client to namenode InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort()); NamenodeProtocol namenode = NameNodeProxies.createProxy(CONF, NameNode.getUri(addr), NamenodeProtocol.class).getProxy(); // get blocks of size fileLen from dataNodes[0] BlockWithLocations[] locs; locs = namenode.getBlocks(dataNodes[0], fileLen).getBlocks(); assertEquals(locs.length, 2); assertEquals(locs[0].getStorageIDs().length, 2); assertEquals(locs[1].getStorageIDs().length, 2); // get blocks of size BlockSize from dataNodes[0] locs = namenode.getBlocks(dataNodes[0], DEFAULT_BLOCK_SIZE).getBlocks(); assertEquals(locs.length, 1); assertEquals(locs[0].getStorageIDs().length, 2); // get blocks of size 1 from dataNodes[0] locs = namenode.getBlocks(dataNodes[0], 1).getBlocks(); assertEquals(locs.length, 1); assertEquals(locs[0].getStorageIDs().length, 2); // get blocks of size 0 from dataNodes[0] getBlocksWithException(namenode, dataNodes[0], 0); // get blocks of size -1 from dataNodes[0] getBlocksWithException(namenode, dataNodes[0], -1); // get blocks of size BlockSize from a non-existent datanode DatanodeInfo info = DFSTestUtil.getDatanodeInfo("1.2.3.4"); getBlocksWithException(namenode, info, 2); } finally { cluster.shutdown(); } }
Example #26
Source File: BlockManager.java From hadoop with Apache License 2.0 | 4 votes |
/** Get all blocks with location information from a datanode. */ private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, final long size) throws UnregisteredNodeException { final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode); if (node == null) { blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" + " unrecorded node {}", datanode); throw new HadoopIllegalArgumentException( "Datanode " + datanode + " not found."); } int numBlocks = node.numBlocks(); if(numBlocks == 0) { return new BlocksWithLocations(new BlockWithLocations[0]); } Iterator<BlockInfoContiguous> iter = node.getBlockIterator(); int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block // skip blocks for(int i=0; i<startBlock; i++) { iter.next(); } List<BlockWithLocations> results = new ArrayList<BlockWithLocations>(); long totalSize = 0; BlockInfoContiguous curBlock; while(totalSize<size && iter.hasNext()) { curBlock = iter.next(); if(!curBlock.isComplete()) continue; totalSize += addBlock(curBlock, results); } if(totalSize<size) { iter = node.getBlockIterator(); // start from the beginning for(int i=0; i<startBlock&&totalSize<size; i++) { curBlock = iter.next(); if(!curBlock.isComplete()) continue; totalSize += addBlock(curBlock, results); } } return new BlocksWithLocations( results.toArray(new BlockWithLocations[results.size()])); }