Java Code Examples for org.apache.hadoop.hdfs.protocol.LocatedBlock#getStorageTypes()
The following examples show how to use
org.apache.hadoop.hdfs.protocol.LocatedBlock#getStorageTypes() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestStorageMover.java From hadoop with Apache License 2.0 | 6 votes |
private void verifyFile(final Path parent, final HdfsFileStatus status, final Byte expectedPolicyId) throws Exception { HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status; byte policyId = fileStatus.getStoragePolicy(); BlockStoragePolicy policy = policies.getPolicy(policyId); if (expectedPolicyId != null) { Assert.assertEquals((byte)expectedPolicyId, policy.getId()); } final List<StorageType> types = policy.chooseStorageTypes( status.getReplication()); for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) { final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types, lb.getStorageTypes()); Assert.assertTrue(fileStatus.getFullName(parent.toString()) + " with policy " + policy + " has non-empty overlap: " + diff + ", the corresponding block is " + lb.getBlock().getLocalBlock(), diff.removeOverlap(true)); } }
Example 2
Source File: TestStorageMover.java From hadoop with Apache License 2.0 | 6 votes |
private Replication getOrVerifyReplication(Path file, Replication expected) throws IOException { final List<LocatedBlock> lbs = dfs.getClient().getLocatedBlocks( file.toString(), 0).getLocatedBlocks(); Assert.assertEquals(1, lbs.size()); LocatedBlock lb = lbs.get(0); StringBuilder types = new StringBuilder(); final Replication r = new Replication(); for(StorageType t : lb.getStorageTypes()) { types.append(t).append(", "); if (t == StorageType.DISK) { r.disk++; } else if (t == StorageType.ARCHIVE) { r.archive++; } else { Assert.fail("Unexpected storage type " + t); } } if (expected != null) { final String s = "file = " + file + "\n types = [" + types + "]"; Assert.assertEquals(s, expected, r); } return r; }
Example 3
Source File: TestStorageMover.java From big-c with Apache License 2.0 | 6 votes |
private void verifyFile(final Path parent, final HdfsFileStatus status, final Byte expectedPolicyId) throws Exception { HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status; byte policyId = fileStatus.getStoragePolicy(); BlockStoragePolicy policy = policies.getPolicy(policyId); if (expectedPolicyId != null) { Assert.assertEquals((byte)expectedPolicyId, policy.getId()); } final List<StorageType> types = policy.chooseStorageTypes( status.getReplication()); for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) { final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types, lb.getStorageTypes()); Assert.assertTrue(fileStatus.getFullName(parent.toString()) + " with policy " + policy + " has non-empty overlap: " + diff + ", the corresponding block is " + lb.getBlock().getLocalBlock(), diff.removeOverlap(true)); } }
Example 4
Source File: TestStorageMover.java From big-c with Apache License 2.0 | 6 votes |
private Replication getOrVerifyReplication(Path file, Replication expected) throws IOException { final List<LocatedBlock> lbs = dfs.getClient().getLocatedBlocks( file.toString(), 0).getLocatedBlocks(); Assert.assertEquals(1, lbs.size()); LocatedBlock lb = lbs.get(0); StringBuilder types = new StringBuilder(); final Replication r = new Replication(); for(StorageType t : lb.getStorageTypes()) { types.append(t).append(", "); if (t == StorageType.DISK) { r.disk++; } else if (t == StorageType.ARCHIVE) { r.archive++; } else { Assert.fail("Unexpected storage type " + t); } } if (expected != null) { final String s = "file = " + file + "\n types = [" + types + "]"; Assert.assertEquals(s, expected, r); } return r; }
Example 5
Source File: DFSInputStream.java From hadoop with Apache License 2.0 | 5 votes |
/** * Get the best node from which to stream the data. * @param block LocatedBlock, containing nodes in priority order. * @param ignoredNodes Do not choose nodes in this array (may be null) * @return The DNAddrPair of the best node. * @throws IOException */ private DNAddrPair getBestNodeDNAddrPair(LocatedBlock block, Collection<DatanodeInfo> ignoredNodes) throws IOException { DatanodeInfo[] nodes = block.getLocations(); StorageType[] storageTypes = block.getStorageTypes(); DatanodeInfo chosenNode = null; StorageType storageType = null; if (nodes != null) { for (int i = 0; i < nodes.length; i++) { if (!deadNodes.containsKey(nodes[i]) && (ignoredNodes == null || !ignoredNodes.contains(nodes[i]))) { chosenNode = nodes[i]; // Storage types are ordered to correspond with nodes, so use the same // index to get storage type. if (storageTypes != null && i < storageTypes.length) { storageType = storageTypes[i]; } break; } } } if (chosenNode == null) { throw new IOException("No live nodes contain block " + block.getBlock() + " after checking nodes = " + Arrays.toString(nodes) + ", ignoredNodes = " + ignoredNodes); } final String dnAddr = chosenNode.getXferAddr(dfsClient.getConf().connectToDnViaHostname); if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Connecting to datanode " + dnAddr); } InetSocketAddress targetAddr = NetUtils.createSocketAddr(dnAddr); return new DNAddrPair(chosenNode, targetAddr, storageType); }
Example 6
Source File: PBHelper.java From hadoop with Apache License 2.0 | 5 votes |
public static LocatedBlockProto convert(LocatedBlock b) { if (b == null) return null; Builder builder = LocatedBlockProto.newBuilder(); DatanodeInfo[] locs = b.getLocations(); List<DatanodeInfo> cachedLocs = Lists.newLinkedList(Arrays.asList(b.getCachedLocations())); for (int i = 0; i < locs.length; i++) { DatanodeInfo loc = locs[i]; builder.addLocs(i, PBHelper.convert(loc)); boolean locIsCached = cachedLocs.contains(loc); builder.addIsCached(locIsCached); if (locIsCached) { cachedLocs.remove(loc); } } Preconditions.checkArgument(cachedLocs.size() == 0, "Found additional cached replica locations that are not in the set of" + " storage-backed locations!"); StorageType[] storageTypes = b.getStorageTypes(); if (storageTypes != null) { for (int i = 0; i < storageTypes.length; ++i) { builder.addStorageTypes(PBHelper.convertStorageType(storageTypes[i])); } } final String[] storageIDs = b.getStorageIDs(); if (storageIDs != null) { builder.addAllStorageIDs(Arrays.asList(storageIDs)); } return builder.setB(PBHelper.convert(b.getBlock())) .setBlockToken(PBHelper.convert(b.getBlockToken())) .setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build(); }
Example 7
Source File: TestLazyPersistFiles.java From hadoop with Apache License 2.0 | 5 votes |
/** * File partially fit in RamDisk after eviction. * RamDisk can fit 2 blocks. Write a file with 5 blocks. * Expect 2 or less blocks are on RamDisk and 3 or more on disk. * @throws IOException */ @Test public void testFallbackToDiskPartial() throws IOException, InterruptedException { startUpCluster(true, 2); final String METHOD_NAME = GenericTestUtils.getMethodName(); Path path = new Path("/" + METHOD_NAME + ".dat"); makeTestFile(path, BLOCK_SIZE * 5, true); // Sleep for a short time to allow the lazy writer thread to do its job Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000); triggerBlockReport(); int numBlocksOnRamDisk = 0; int numBlocksOnDisk = 0; long fileLength = client.getFileInfo(path.toString()).getLen(); LocatedBlocks locatedBlocks = client.getLocatedBlocks(path.toString(), 0, fileLength); for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) { if (locatedBlock.getStorageTypes()[0] == RAM_DISK) { numBlocksOnRamDisk++; } else if (locatedBlock.getStorageTypes()[0] == DEFAULT) { numBlocksOnDisk++; } } // Since eviction is asynchronous, depending on the timing of eviction // wrt writes, we may get 2 or less blocks on RAM disk. assert(numBlocksOnRamDisk <= 2); assert(numBlocksOnDisk >= 3); }
Example 8
Source File: DFSInputStream.java From big-c with Apache License 2.0 | 5 votes |
/** * Get the best node from which to stream the data. * @param block LocatedBlock, containing nodes in priority order. * @param ignoredNodes Do not choose nodes in this array (may be null) * @return The DNAddrPair of the best node. * @throws IOException */ private DNAddrPair getBestNodeDNAddrPair(LocatedBlock block, Collection<DatanodeInfo> ignoredNodes) throws IOException { DatanodeInfo[] nodes = block.getLocations(); StorageType[] storageTypes = block.getStorageTypes(); DatanodeInfo chosenNode = null; StorageType storageType = null; if (nodes != null) { for (int i = 0; i < nodes.length; i++) { if (!deadNodes.containsKey(nodes[i]) && (ignoredNodes == null || !ignoredNodes.contains(nodes[i]))) { chosenNode = nodes[i]; // Storage types are ordered to correspond with nodes, so use the same // index to get storage type. if (storageTypes != null && i < storageTypes.length) { storageType = storageTypes[i]; } break; } } } if (chosenNode == null) { throw new IOException("No live nodes contain block " + block.getBlock() + " after checking nodes = " + Arrays.toString(nodes) + ", ignoredNodes = " + ignoredNodes); } final String dnAddr = chosenNode.getXferAddr(dfsClient.getConf().connectToDnViaHostname); if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Connecting to datanode " + dnAddr); } InetSocketAddress targetAddr = NetUtils.createSocketAddr(dnAddr); return new DNAddrPair(chosenNode, targetAddr, storageType); }
Example 9
Source File: PBHelper.java From big-c with Apache License 2.0 | 5 votes |
public static LocatedBlockProto convert(LocatedBlock b) { if (b == null) return null; Builder builder = LocatedBlockProto.newBuilder(); DatanodeInfo[] locs = b.getLocations(); List<DatanodeInfo> cachedLocs = Lists.newLinkedList(Arrays.asList(b.getCachedLocations())); for (int i = 0; i < locs.length; i++) { DatanodeInfo loc = locs[i]; builder.addLocs(i, PBHelper.convert(loc)); boolean locIsCached = cachedLocs.contains(loc); builder.addIsCached(locIsCached); if (locIsCached) { cachedLocs.remove(loc); } } Preconditions.checkArgument(cachedLocs.size() == 0, "Found additional cached replica locations that are not in the set of" + " storage-backed locations!"); StorageType[] storageTypes = b.getStorageTypes(); if (storageTypes != null) { for (int i = 0; i < storageTypes.length; ++i) { builder.addStorageTypes(PBHelper.convertStorageType(storageTypes[i])); } } final String[] storageIDs = b.getStorageIDs(); if (storageIDs != null) { builder.addAllStorageIDs(Arrays.asList(storageIDs)); } return builder.setB(PBHelper.convert(b.getBlock())) .setBlockToken(PBHelper.convert(b.getBlockToken())) .setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build(); }
Example 10
Source File: TestLazyPersistFiles.java From big-c with Apache License 2.0 | 5 votes |
/** * File partially fit in RamDisk after eviction. * RamDisk can fit 2 blocks. Write a file with 5 blocks. * Expect 2 or less blocks are on RamDisk and 3 or more on disk. * @throws IOException */ @Test public void testFallbackToDiskPartial() throws IOException, InterruptedException { startUpCluster(true, 2); final String METHOD_NAME = GenericTestUtils.getMethodName(); Path path = new Path("/" + METHOD_NAME + ".dat"); makeTestFile(path, BLOCK_SIZE * 5, true); // Sleep for a short time to allow the lazy writer thread to do its job Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000); triggerBlockReport(); int numBlocksOnRamDisk = 0; int numBlocksOnDisk = 0; long fileLength = client.getFileInfo(path.toString()).getLen(); LocatedBlocks locatedBlocks = client.getLocatedBlocks(path.toString(), 0, fileLength); for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) { if (locatedBlock.getStorageTypes()[0] == RAM_DISK) { numBlocksOnRamDisk++; } else if (locatedBlock.getStorageTypes()[0] == DEFAULT) { numBlocksOnDisk++; } } // Since eviction is asynchronous, depending on the timing of eviction // wrt writes, we may get 2 or less blocks on RAM disk. assert(numBlocksOnRamDisk <= 2); assert(numBlocksOnDisk >= 3); }
Example 11
Source File: DFSOutputStream.java From hadoop with Apache License 2.0 | 4 votes |
/** * Open a DataOutputStream to a DataNode so that it can be written to. * This happens when a file is created and each time a new block is allocated. * Must get block ID and the IDs of the destinations from the namenode. * Returns the list of target datanodes. */ private LocatedBlock nextBlockOutputStream() throws IOException { LocatedBlock lb = null; DatanodeInfo[] nodes = null; StorageType[] storageTypes = null; int count = dfsClient.getConf().nBlockWriteRetry; boolean success = false; ExtendedBlock oldBlock = block; do { hasError = false; lastException.set(null); errorIndex = -1; success = false; DatanodeInfo[] excluded = excludedNodes.getAllPresent(excludedNodes.asMap().keySet()) .keySet() .toArray(new DatanodeInfo[0]); block = oldBlock; lb = locateFollowingBlock(excluded.length > 0 ? excluded : null); block = lb.getBlock(); block.setNumBytes(0); bytesSent = 0; accessToken = lb.getBlockToken(); nodes = lb.getLocations(); storageTypes = lb.getStorageTypes(); // // Connect to first DataNode in the list. // success = createBlockOutputStream(nodes, storageTypes, 0L, false); if (!success) { DFSClient.LOG.info("Abandoning " + block); dfsClient.namenode.abandonBlock(block, fileId, src, dfsClient.clientName); block = null; DFSClient.LOG.info("Excluding datanode " + nodes[errorIndex]); excludedNodes.put(nodes[errorIndex], nodes[errorIndex]); } } while (!success && --count >= 0); if (!success) { throw new IOException("Unable to create new block."); } return lb; }
Example 12
Source File: TestDatanodeManager.java From hadoop with Apache License 2.0 | 4 votes |
/** * This test creates a LocatedBlock with 5 locations, sorts the locations * based on the network topology, and ensures the locations are still aligned * with the storage ids and storage types. */ @Test public void testSortLocatedBlocks() throws IOException { // create the DatanodeManager which will be tested FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class), fsn, new Configuration()); // register 5 datanodes, each with different storage ID and type DatanodeInfo[] locs = new DatanodeInfo[5]; String[] storageIDs = new String[5]; StorageType[] storageTypes = new StorageType[]{ StorageType.ARCHIVE, StorageType.DEFAULT, StorageType.DISK, StorageType.RAM_DISK, StorageType.SSD }; for(int i = 0; i < 5; i++) { // register new datanode String uuid = "UUID-"+i; String ip = "IP-" + i; DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class); Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid); Mockito.when(dr.getIpAddr()).thenReturn(ip); Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000"); Mockito.when(dr.getXferPort()).thenReturn(9000); Mockito.when(dr.getSoftwareVersion()).thenReturn("version1"); dm.registerDatanode(dr); // get location and storage information locs[i] = dm.getDatanode(uuid); storageIDs[i] = "storageID-"+i; } // set first 2 locations as decomissioned locs[0].setDecommissioned(); locs[1].setDecommissioned(); // create LocatedBlock with above locations ExtendedBlock b = new ExtendedBlock("somePoolID", 1234); LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes); List<LocatedBlock> blocks = new ArrayList<>(); blocks.add(block); final String targetIp = locs[4].getIpAddr(); // sort block locations dm.sortLocatedBlocks(targetIp, blocks); // check that storage IDs/types are aligned with datanode locs DatanodeInfo[] sortedLocs = block.getLocations(); storageIDs = block.getStorageIDs(); storageTypes = block.getStorageTypes(); assertThat(sortedLocs.length, is(5)); assertThat(storageIDs.length, is(5)); assertThat(storageTypes.length, is(5)); for(int i = 0; i < sortedLocs.length; i++) { assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageID(), is(storageIDs[i])); assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageType(), is(storageTypes[i])); } // Ensure the local node is first. assertThat(sortedLocs[0].getIpAddr(), is(targetIp)); // Ensure the two decommissioned DNs were moved to the end. assertThat(sortedLocs[sortedLocs.length-1].getAdminState(), is(DatanodeInfo.AdminStates.DECOMMISSIONED)); assertThat(sortedLocs[sortedLocs.length-2].getAdminState(), is(DatanodeInfo.AdminStates.DECOMMISSIONED)); }
Example 13
Source File: DFSOutputStream.java From big-c with Apache License 2.0 | 4 votes |
/** * Open a DataOutputStream to a DataNode so that it can be written to. * This happens when a file is created and each time a new block is allocated. * Must get block ID and the IDs of the destinations from the namenode. * Returns the list of target datanodes. */ private LocatedBlock nextBlockOutputStream() throws IOException { LocatedBlock lb = null; DatanodeInfo[] nodes = null; StorageType[] storageTypes = null; int count = dfsClient.getConf().nBlockWriteRetry; boolean success = false; ExtendedBlock oldBlock = block; do { hasError = false; lastException.set(null); errorIndex = -1; success = false; DatanodeInfo[] excluded = excludedNodes.getAllPresent(excludedNodes.asMap().keySet()) .keySet() .toArray(new DatanodeInfo[0]); block = oldBlock; lb = locateFollowingBlock(excluded.length > 0 ? excluded : null); block = lb.getBlock(); block.setNumBytes(0); bytesSent = 0; accessToken = lb.getBlockToken(); nodes = lb.getLocations(); storageTypes = lb.getStorageTypes(); // // Connect to first DataNode in the list. // success = createBlockOutputStream(nodes, storageTypes, 0L, false); if (!success) { DFSClient.LOG.info("Abandoning " + block); dfsClient.namenode.abandonBlock(block, fileId, src, dfsClient.clientName); block = null; DFSClient.LOG.info("Excluding datanode " + nodes[errorIndex]); excludedNodes.put(nodes[errorIndex], nodes[errorIndex]); } } while (!success && --count >= 0); if (!success) { throw new IOException("Unable to create new block."); } return lb; }
Example 14
Source File: TestDatanodeManager.java From big-c with Apache License 2.0 | 4 votes |
/** * This test creates a LocatedBlock with 5 locations, sorts the locations * based on the network topology, and ensures the locations are still aligned * with the storage ids and storage types. */ @Test public void testSortLocatedBlocks() throws IOException { // create the DatanodeManager which will be tested FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class), fsn, new Configuration()); // register 5 datanodes, each with different storage ID and type DatanodeInfo[] locs = new DatanodeInfo[5]; String[] storageIDs = new String[5]; StorageType[] storageTypes = new StorageType[]{ StorageType.ARCHIVE, StorageType.DEFAULT, StorageType.DISK, StorageType.RAM_DISK, StorageType.SSD }; for(int i = 0; i < 5; i++) { // register new datanode String uuid = "UUID-"+i; String ip = "IP-" + i; DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class); Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid); Mockito.when(dr.getIpAddr()).thenReturn(ip); Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000"); Mockito.when(dr.getXferPort()).thenReturn(9000); Mockito.when(dr.getSoftwareVersion()).thenReturn("version1"); dm.registerDatanode(dr); // get location and storage information locs[i] = dm.getDatanode(uuid); storageIDs[i] = "storageID-"+i; } // set first 2 locations as decomissioned locs[0].setDecommissioned(); locs[1].setDecommissioned(); // create LocatedBlock with above locations ExtendedBlock b = new ExtendedBlock("somePoolID", 1234); LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes); List<LocatedBlock> blocks = new ArrayList<>(); blocks.add(block); final String targetIp = locs[4].getIpAddr(); // sort block locations dm.sortLocatedBlocks(targetIp, blocks); // check that storage IDs/types are aligned with datanode locs DatanodeInfo[] sortedLocs = block.getLocations(); storageIDs = block.getStorageIDs(); storageTypes = block.getStorageTypes(); assertThat(sortedLocs.length, is(5)); assertThat(storageIDs.length, is(5)); assertThat(storageTypes.length, is(5)); for(int i = 0; i < sortedLocs.length; i++) { assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageID(), is(storageIDs[i])); assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageType(), is(storageTypes[i])); } // Ensure the local node is first. assertThat(sortedLocs[0].getIpAddr(), is(targetIp)); // Ensure the two decommissioned DNs were moved to the end. assertThat(sortedLocs[sortedLocs.length-1].getAdminState(), is(DatanodeInfo.AdminStates.DECOMMISSIONED)); assertThat(sortedLocs[sortedLocs.length-2].getAdminState(), is(DatanodeInfo.AdminStates.DECOMMISSIONED)); }
Example 15
Source File: FanOutOneBlockAsyncDFSOutputHelper.java From hbase with Apache License 2.0 | 4 votes |
private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client, String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass) { StorageType[] storageTypes = locatedBlock.getStorageTypes(); DatanodeInfo[] datanodeInfos = locatedBlock.getLocations(); boolean connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT); ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock()); blockCopy.setNumBytes(locatedBlock.getBlockSize()); ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder() .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PBHelperClient.convert(blockCopy)) .setToken(PBHelperClient.convert(locatedBlock.getBlockToken()))) .setClientName(clientName).build(); ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer); OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder() .setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())) .setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()) .setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS) .setRequestedChecksum(checksumProto) .setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build()); List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length); for (int i = 0; i < datanodeInfos.length; i++) { DatanodeInfo dnInfo = datanodeInfos[i]; StorageType storageType = storageTypes[i]; Promise<Channel> promise = eventLoopGroup.next().newPromise(); futureList.add(promise); String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname); new Bootstrap().group(eventLoopGroup).channel(channelClass) .option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { // we need to get the remote address of the channel so we can only move on after // channel connected. Leave an empty implementation here because netty does not allow // a null handler. } }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (future.isSuccess()) { initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, timeoutMs, client, locatedBlock.getBlockToken(), promise); } else { promise.tryFailure(future.cause()); } } }); } return futureList; }