org.apache.hadoop.hdfs.protocol.BlockListAsLongs Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.protocol.BlockListAsLongs.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestBlockManager.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testSafeModeIBRAfterIncremental() throws Exception { DatanodeDescriptor node = spy(nodes.get(0)); DatanodeStorageInfo ds = node.getStorageInfos()[0]; node.isAlive = true; DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, ""); // pretend to be in safemode doReturn(true).when(fsn).isInStartupSafeMode(); // register new node bm.getDatanodeManager().registerDatanode(nodeReg); bm.getDatanodeManager().addDatanode(node); // swap in spy assertEquals(node, bm.getDatanodeManager().getDatanode(node)); assertEquals(0, ds.getBlockReportCount()); // send block report while pretending to already have blocks reset(node); doReturn(1).when(node).numBlocks(); bm.processReport(node, new DatanodeStorage(ds.getStorageID()), BlockListAsLongs.EMPTY, null, false); assertEquals(1, ds.getBlockReportCount()); }
Example #2
Source File: TestBlockManager.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testSafeModeIBRAfterIncremental() throws Exception { DatanodeDescriptor node = spy(nodes.get(0)); DatanodeStorageInfo ds = node.getStorageInfos()[0]; node.isAlive = true; DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, ""); // pretend to be in safemode doReturn(true).when(fsn).isInStartupSafeMode(); // register new node bm.getDatanodeManager().registerDatanode(nodeReg); bm.getDatanodeManager().addDatanode(node); // swap in spy assertEquals(node, bm.getDatanodeManager().getDatanode(node)); assertEquals(0, ds.getBlockReportCount()); // send block report while pretending to already have blocks reset(node); doReturn(1).when(node).numBlocks(); bm.processReport(node, new DatanodeStorage(ds.getStorageID()), BlockListAsLongs.EMPTY, null, false); assertEquals(1, ds.getBlockReportCount()); }
Example #3
Source File: IncrementalBlockReport.java From RDFS with Apache License 2.0 | 6 votes |
public IncrementalBlockReport(Block[] blocks) { currentBlock = 0; currentHint = 0; if (blocks == null || blocks.length == 0) { this.delHintsMap = LightWeightBitSet.getBitSet(0); this.delHints = new String[0]; this.blocks = new long[0]; return; } this.delHintsMap = LightWeightBitSet.getBitSet(blocks.length); ArrayList<String> hints = new ArrayList<String>(0); for (int i = 0; i < blocks.length; i++) { Block b = blocks[i]; if (b instanceof ReceivedBlockInfo) { ReceivedBlockInfo rbi = (ReceivedBlockInfo) b; hints.add(rbi.getDelHints()); LightWeightBitSet.set(delHintsMap, i); } } this.delHints = hints.toArray(new String[hints.size()]); this.blocks = BlockListAsLongs.convertToArrayLongs(blocks); }
Example #4
Source File: NNThroughputBenchmark.java From hadoop with Apache License 2.0 | 6 votes |
void register() throws IOException { // get versions from the namenode nsInfo = nameNodeProto.versionRequest(); dnRegistration = new DatanodeRegistration( new DatanodeID(DNS.getDefaultIP("default"), DNS.getDefaultHost("default", "default"), DataNode.generateUuid(), getNodePort(dnIdx), DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT), new DataStorage(nsInfo), new ExportedBlockKeys(), VersionInfo.getVersion()); // register datanode dnRegistration = nameNodeProto.registerDatanode(dnRegistration); //first block reports storage = new DatanodeStorage(DatanodeStorage.generateUuid()); final StorageBlockReport[] reports = { new StorageBlockReport(storage, BlockListAsLongs.EMPTY) }; nameNodeProto.blockReport(dnRegistration, nameNode.getNamesystem().getBlockPoolId(), reports, new BlockReportContext(1, 0, System.nanoTime())); }
Example #5
Source File: TestDFSShell.java From hadoop with Apache License 2.0 | 6 votes |
static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException { List<File> files = new ArrayList<File>(); List<DataNode> datanodes = cluster.getDataNodes(); String poolId = cluster.getNamesystem().getBlockPoolId(); List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId); for(int i = 0; i < blocks.size(); i++) { DataNode dn = datanodes.get(i); Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i); for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) { for(Block b : e.getValue()) { files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId())); } } } return files; }
Example #6
Source File: TestDnRespectsBlockReportSplitThreshold.java From big-c with Apache License 2.0 | 6 votes |
private void verifyCapturedArguments( ArgumentCaptor<StorageBlockReport[]> captor, int expectedReportsPerCall, int expectedTotalBlockCount) { List<StorageBlockReport[]> listOfReports = captor.getAllValues(); int numBlocksReported = 0; for (StorageBlockReport[] reports : listOfReports) { assertThat(reports.length, is(expectedReportsPerCall)); for (StorageBlockReport report : reports) { BlockListAsLongs blockList = report.getBlocks(); numBlocksReported += blockList.getNumberOfBlocks(); } } assert(numBlocksReported >= expectedTotalBlockCount); }
Example #7
Source File: TestDnRespectsBlockReportSplitThreshold.java From hadoop with Apache License 2.0 | 6 votes |
private void verifyCapturedArguments( ArgumentCaptor<StorageBlockReport[]> captor, int expectedReportsPerCall, int expectedTotalBlockCount) { List<StorageBlockReport[]> listOfReports = captor.getAllValues(); int numBlocksReported = 0; for (StorageBlockReport[] reports : listOfReports) { assertThat(reports.length, is(expectedReportsPerCall)); for (StorageBlockReport report : reports) { BlockListAsLongs blockList = report.getBlocks(); numBlocksReported += blockList.getNumberOfBlocks(); } } assert(numBlocksReported >= expectedTotalBlockCount); }
Example #8
Source File: TestDFSShell.java From big-c with Apache License 2.0 | 6 votes |
static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException { List<File> files = new ArrayList<File>(); List<DataNode> datanodes = cluster.getDataNodes(); String poolId = cluster.getNamesystem().getBlockPoolId(); List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId); for(int i = 0; i < blocks.size(); i++) { DataNode dn = datanodes.get(i); Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i); for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) { for(Block b : e.getValue()) { files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId())); } } } return files; }
Example #9
Source File: NNThroughputBenchmark.java From big-c with Apache License 2.0 | 6 votes |
void register() throws IOException { // get versions from the namenode nsInfo = nameNodeProto.versionRequest(); dnRegistration = new DatanodeRegistration( new DatanodeID(DNS.getDefaultIP("default"), DNS.getDefaultHost("default", "default"), DataNode.generateUuid(), getNodePort(dnIdx), DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT), new DataStorage(nsInfo), new ExportedBlockKeys(), VersionInfo.getVersion()); // register datanode dnRegistration = nameNodeProto.registerDatanode(dnRegistration); //first block reports storage = new DatanodeStorage(DatanodeStorage.generateUuid()); final StorageBlockReport[] reports = { new StorageBlockReport(storage, BlockListAsLongs.EMPTY) }; nameNodeProto.blockReport(dnRegistration, nameNode.getNamesystem().getBlockPoolId(), reports, new BlockReportContext(1, 0, System.nanoTime())); }
Example #10
Source File: IncrementalBlockReport.java From RDFS with Apache License 2.0 | 5 votes |
public String getNext(Block b) { String hint = null; BlockListAsLongs.getBlockInfo(b, blocks, currentBlock); if (LightWeightBitSet.get(delHintsMap, currentBlock)) { hint = delHints[currentHint]; currentHint++; } currentBlock++; return hint; }
Example #11
Source File: MiniDFSCluster.java From hadoop with Apache License 2.0 | 5 votes |
/** * * @return block reports from all data nodes * BlockListAsLongs is indexed in the same order as the list of datanodes returned by getDataNodes() */ public List<Map<DatanodeStorage, BlockListAsLongs>> getAllBlockReports(String bpid) { int numDataNodes = dataNodes.size(); final List<Map<DatanodeStorage, BlockListAsLongs>> result = new ArrayList<Map<DatanodeStorage, BlockListAsLongs>>(numDataNodes); for (int i = 0; i < numDataNodes; ++i) { result.add(getBlockReport(bpid, i)); } return result; }
Example #12
Source File: TestDataNodeHotSwapVolumes.java From hadoop with Apache License 2.0 | 5 votes |
private List<List<Integer>> getNumBlocksReport(int namesystemIdx) { List<List<Integer>> results = new ArrayList<List<Integer>>(); final String bpid = cluster.getNamesystem(namesystemIdx).getBlockPoolId(); List<Map<DatanodeStorage, BlockListAsLongs>> blockReports = cluster.getAllBlockReports(bpid); for (Map<DatanodeStorage, BlockListAsLongs> datanodeReport : blockReports) { List<Integer> numBlocksPerDN = new ArrayList<Integer>(); for (BlockListAsLongs blocks : datanodeReport.values()) { numBlocksPerDN.add(blocks.getNumberOfBlocks()); } results.add(numBlocksPerDN); } return results; }
Example #13
Source File: TestDataNodeHotSwapVolumes.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test adding one volume on a running MiniDFSCluster with only one NameNode. */ @Test(timeout=60000) public void testAddOneNewVolume() throws IOException, ReconfigurationException, InterruptedException, TimeoutException { startDFSCluster(1, 1); String bpid = cluster.getNamesystem().getBlockPoolId(); final int numBlocks = 10; addVolumes(1); Path testFile = new Path("/test"); createFile(testFile, numBlocks); List<Map<DatanodeStorage, BlockListAsLongs>> blockReports = cluster.getAllBlockReports(bpid); assertEquals(1, blockReports.size()); // 1 DataNode assertEquals(3, blockReports.get(0).size()); // 3 volumes // FSVolumeList uses Round-Robin block chooser by default. Thus the new // blocks should be evenly located in all volumes. int minNumBlocks = Integer.MAX_VALUE; int maxNumBlocks = Integer.MIN_VALUE; for (BlockListAsLongs blockList : blockReports.get(0).values()) { minNumBlocks = Math.min(minNumBlocks, blockList.getNumberOfBlocks()); maxNumBlocks = Math.max(maxNumBlocks, blockList.getNumberOfBlocks()); } assertTrue(Math.abs(maxNumBlocks - maxNumBlocks) <= 1); verifyFileLength(cluster.getFileSystem(), testFile, numBlocks); }
Example #14
Source File: TestDataNodeHotSwapVolumes.java From hadoop with Apache License 2.0 | 5 votes |
@Test(timeout=60000) public void testAddVolumesDuringWrite() throws IOException, InterruptedException, TimeoutException, ReconfigurationException { startDFSCluster(1, 1); String bpid = cluster.getNamesystem().getBlockPoolId(); Path testFile = new Path("/test"); createFile(testFile, 4); // Each volume has 2 blocks. addVolumes(2); // Continue to write the same file, thus the new volumes will have blocks. DFSTestUtil.appendFile(cluster.getFileSystem(), testFile, BLOCK_SIZE * 8); verifyFileLength(cluster.getFileSystem(), testFile, 8 + 4); // After appending data, there should be [2, 2, 4, 4] blocks in each volume // respectively. List<Integer> expectedNumBlocks = Arrays.asList(2, 2, 4, 4); List<Map<DatanodeStorage, BlockListAsLongs>> blockReports = cluster.getAllBlockReports(bpid); assertEquals(1, blockReports.size()); // 1 DataNode assertEquals(4, blockReports.get(0).size()); // 4 volumes Map<DatanodeStorage, BlockListAsLongs> dnReport = blockReports.get(0); List<Integer> actualNumBlocks = new ArrayList<Integer>(); for (BlockListAsLongs blockList : dnReport.values()) { actualNumBlocks.add(blockList.getNumberOfBlocks()); } Collections.sort(actualNumBlocks); assertEquals(expectedNumBlocks, actualNumBlocks); }
Example #15
Source File: MiniDFSCluster.java From hadoop with Apache License 2.0 | 5 votes |
/** * * @param dataNodeIndex - data node whose block report is desired - the index is same as for getDataNodes() * @return the block report for the specified data node */ public Map<DatanodeStorage, BlockListAsLongs> getBlockReport(String bpid, int dataNodeIndex) { if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) { throw new IndexOutOfBoundsException(); } final DataNode dn = dataNodes.get(dataNodeIndex).datanode; return DataNodeTestUtils.getFSDataset(dn).getBlockReports(bpid); }
Example #16
Source File: TestDataNodeHotSwapVolumes.java From hadoop with Apache License 2.0 | 5 votes |
@Test(timeout=60000) public void testRemoveOneVolume() throws ReconfigurationException, InterruptedException, TimeoutException, IOException { startDFSCluster(1, 1); final short replFactor = 1; Path testFile = new Path("/test"); createFile(testFile, 10, replFactor); DataNode dn = cluster.getDataNodes().get(0); Collection<String> oldDirs = getDataDirs(dn); String newDirs = oldDirs.iterator().next(); // Keep the first volume. dn.reconfigurePropertyImpl( DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs); assertFileLocksReleased( new ArrayList<String>(oldDirs).subList(1, oldDirs.size())); dn.scheduleAllBlockReport(0); try { DFSTestUtil.readFile(cluster.getFileSystem(), testFile); fail("Expect to throw BlockMissingException."); } catch (BlockMissingException e) { GenericTestUtils.assertExceptionContains("Could not obtain block", e); } Path newFile = new Path("/newFile"); createFile(newFile, 6); String bpid = cluster.getNamesystem().getBlockPoolId(); List<Map<DatanodeStorage, BlockListAsLongs>> blockReports = cluster.getAllBlockReports(bpid); assertEquals((int)replFactor, blockReports.size()); BlockListAsLongs blocksForVolume1 = blockReports.get(0).values().iterator().next(); // The first volume has half of the testFile and full of newFile. assertEquals(10 / 2 + 6, blocksForVolume1.getNumberOfBlocks()); }
Example #17
Source File: SimulatedFSDataset.java From big-c with Apache License 2.0 | 5 votes |
synchronized BlockListAsLongs getBlockReport(String bpid) { BlockListAsLongs.Builder report = BlockListAsLongs.builder(); final Map<Block, BInfo> map = blockMap.get(bpid); if (map != null) { for (BInfo b : map.values()) { if (b.isFinalized()) { report.add(b); } } } return report.build(); }
Example #18
Source File: NameNodeRpcServer.java From big-c with Apache License 2.0 | 5 votes |
@Override // DatanodeProtocol public DatanodeCommand blockReport(DatanodeRegistration nodeReg, String poolId, StorageBlockReport[] reports, BlockReportContext context) throws IOException { checkNNStartup(); verifyRequest(nodeReg); if(blockStateChangeLog.isDebugEnabled()) { blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: " + "from " + nodeReg + ", reports.length=" + reports.length); } final BlockManager bm = namesystem.getBlockManager(); boolean noStaleStorages = false; for (int r = 0; r < reports.length; r++) { final BlockListAsLongs blocks = reports[r].getBlocks(); // // BlockManager.processReport accumulates information of prior calls // for the same node and storage, so the value returned by the last // call of this loop is the final updated value for noStaleStorage. // noStaleStorages = bm.processReport(nodeReg, reports[r].getStorage(), blocks, context, (r == reports.length - 1)); metrics.incrStorageBlockReportOps(); } if (nn.getFSImage().isUpgradeFinalized() && !namesystem.isRollingUpgrade() && !nn.isStandbyState() && noStaleStorages) { return new FinalizeCommand(poolId); } return null; }
Example #19
Source File: NNThroughputBenchmark.java From hadoop with Apache License 2.0 | 5 votes |
void formBlockReport() { // fill remaining slots with blocks that do not exist for (int idx = blocks.size()-1; idx >= nrBlocks; idx--) { Block block = new Block(blocks.size() - idx, 0, 0); blocks.set(idx, new BlockReportReplica(block)); } blockReportList = BlockListAsLongs.EMPTY; }
Example #20
Source File: TestDataNodeHotSwapVolumes.java From big-c with Apache License 2.0 | 5 votes |
@Test(timeout=60000) public void testRemoveOneVolume() throws ReconfigurationException, InterruptedException, TimeoutException, IOException { startDFSCluster(1, 1); final short replFactor = 1; Path testFile = new Path("/test"); createFile(testFile, 10, replFactor); DataNode dn = cluster.getDataNodes().get(0); Collection<String> oldDirs = getDataDirs(dn); String newDirs = oldDirs.iterator().next(); // Keep the first volume. dn.reconfigurePropertyImpl( DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs); assertFileLocksReleased( new ArrayList<String>(oldDirs).subList(1, oldDirs.size())); dn.scheduleAllBlockReport(0); try { DFSTestUtil.readFile(cluster.getFileSystem(), testFile); fail("Expect to throw BlockMissingException."); } catch (BlockMissingException e) { GenericTestUtils.assertExceptionContains("Could not obtain block", e); } Path newFile = new Path("/newFile"); createFile(newFile, 6); String bpid = cluster.getNamesystem().getBlockPoolId(); List<Map<DatanodeStorage, BlockListAsLongs>> blockReports = cluster.getAllBlockReports(bpid); assertEquals((int)replFactor, blockReports.size()); BlockListAsLongs blocksForVolume1 = blockReports.get(0).values().iterator().next(); // The first volume has half of the testFile and full of newFile. assertEquals(10 / 2 + 6, blocksForVolume1.getNumberOfBlocks()); }
Example #21
Source File: TestDataNodeHotSwapVolumes.java From big-c with Apache License 2.0 | 5 votes |
@Test(timeout=60000) public void testAddVolumesDuringWrite() throws IOException, InterruptedException, TimeoutException, ReconfigurationException { startDFSCluster(1, 1); String bpid = cluster.getNamesystem().getBlockPoolId(); Path testFile = new Path("/test"); createFile(testFile, 4); // Each volume has 2 blocks. addVolumes(2); // Continue to write the same file, thus the new volumes will have blocks. DFSTestUtil.appendFile(cluster.getFileSystem(), testFile, BLOCK_SIZE * 8); verifyFileLength(cluster.getFileSystem(), testFile, 8 + 4); // After appending data, there should be [2, 2, 4, 4] blocks in each volume // respectively. List<Integer> expectedNumBlocks = Arrays.asList(2, 2, 4, 4); List<Map<DatanodeStorage, BlockListAsLongs>> blockReports = cluster.getAllBlockReports(bpid); assertEquals(1, blockReports.size()); // 1 DataNode assertEquals(4, blockReports.get(0).size()); // 4 volumes Map<DatanodeStorage, BlockListAsLongs> dnReport = blockReports.get(0); List<Integer> actualNumBlocks = new ArrayList<Integer>(); for (BlockListAsLongs blockList : dnReport.values()) { actualNumBlocks.add(blockList.getNumberOfBlocks()); } Collections.sort(actualNumBlocks); assertEquals(expectedNumBlocks, actualNumBlocks); }
Example #22
Source File: TestDataNodeHotSwapVolumes.java From big-c with Apache License 2.0 | 5 votes |
/** * Test adding one volume on a running MiniDFSCluster with only one NameNode. */ @Test(timeout=60000) public void testAddOneNewVolume() throws IOException, ReconfigurationException, InterruptedException, TimeoutException { startDFSCluster(1, 1); String bpid = cluster.getNamesystem().getBlockPoolId(); final int numBlocks = 10; addVolumes(1); Path testFile = new Path("/test"); createFile(testFile, numBlocks); List<Map<DatanodeStorage, BlockListAsLongs>> blockReports = cluster.getAllBlockReports(bpid); assertEquals(1, blockReports.size()); // 1 DataNode assertEquals(3, blockReports.get(0).size()); // 3 volumes // FSVolumeList uses Round-Robin block chooser by default. Thus the new // blocks should be evenly located in all volumes. int minNumBlocks = Integer.MAX_VALUE; int maxNumBlocks = Integer.MIN_VALUE; for (BlockListAsLongs blockList : blockReports.get(0).values()) { minNumBlocks = Math.min(minNumBlocks, blockList.getNumberOfBlocks()); maxNumBlocks = Math.max(maxNumBlocks, blockList.getNumberOfBlocks()); } assertTrue(Math.abs(maxNumBlocks - maxNumBlocks) <= 1); verifyFileLength(cluster.getFileSystem(), testFile, numBlocks); }
Example #23
Source File: TestDataNodeHotSwapVolumes.java From big-c with Apache License 2.0 | 5 votes |
private List<List<Integer>> getNumBlocksReport(int namesystemIdx) { List<List<Integer>> results = new ArrayList<List<Integer>>(); final String bpid = cluster.getNamesystem(namesystemIdx).getBlockPoolId(); List<Map<DatanodeStorage, BlockListAsLongs>> blockReports = cluster.getAllBlockReports(bpid); for (Map<DatanodeStorage, BlockListAsLongs> datanodeReport : blockReports) { List<Integer> numBlocksPerDN = new ArrayList<Integer>(); for (BlockListAsLongs blocks : datanodeReport.values()) { numBlocksPerDN.add(blocks.getNumberOfBlocks()); } results.add(numBlocksPerDN); } return results; }
Example #24
Source File: ExternalDatasetImpl.java From big-c with Apache License 2.0 | 5 votes |
@Override public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) { final Map<DatanodeStorage, BlockListAsLongs> result = new HashMap<DatanodeStorage, BlockListAsLongs>(); result.put(storage, BlockListAsLongs.EMPTY); return result; }
Example #25
Source File: DatanodeProtocolClientSideTranslatorPB.java From big-c with Apache License 2.0 | 5 votes |
@Override public DatanodeCommand blockReport(DatanodeRegistration registration, String poolId, StorageBlockReport[] reports, BlockReportContext context) throws IOException { BlockReportRequestProto.Builder builder = BlockReportRequestProto .newBuilder().setRegistration(PBHelper.convert(registration)) .setBlockPoolId(poolId); boolean useBlocksBuffer = registration.getNamespaceInfo() .isCapabilitySupported(Capability.STORAGE_BLOCK_REPORT_BUFFERS); for (StorageBlockReport r : reports) { StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto .newBuilder().setStorage(PBHelper.convert(r.getStorage())); BlockListAsLongs blocks = r.getBlocks(); if (useBlocksBuffer) { reportBuilder.setNumberOfBlocks(blocks.getNumberOfBlocks()); reportBuilder.addAllBlocksBuffers(blocks.getBlocksBuffers()); } else { for (long value : blocks.getBlockListAsLongs()) { reportBuilder.addBlocks(value); } } builder.addReports(reportBuilder.build()); } builder.setContext(PBHelper.convert(context)); BlockReportResponseProto resp; try { resp = rpcProxy.blockReport(NULL_CONTROLLER, builder.build()); } catch (ServiceException se) { throw ProtobufHelper.getRemoteException(se); } return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null; }
Example #26
Source File: DatanodeProtocolServerSideTranslatorPB.java From big-c with Apache License 2.0 | 5 votes |
@Override public BlockReportResponseProto blockReport(RpcController controller, BlockReportRequestProto request) throws ServiceException { DatanodeCommand cmd = null; StorageBlockReport[] report = new StorageBlockReport[request.getReportsCount()]; int index = 0; for (StorageBlockReportProto s : request.getReportsList()) { final BlockListAsLongs blocks; if (s.hasNumberOfBlocks()) { // new style buffer based reports int num = (int)s.getNumberOfBlocks(); Preconditions.checkState(s.getBlocksCount() == 0, "cannot send both blocks list and buffers"); blocks = BlockListAsLongs.decodeBuffers(num, s.getBlocksBuffersList()); } else { blocks = BlockListAsLongs.decodeLongs(s.getBlocksList()); } report[index++] = new StorageBlockReport(PBHelper.convert(s.getStorage()), blocks); } try { cmd = impl.blockReport(PBHelper.convert(request.getRegistration()), request.getBlockPoolId(), report, request.hasContext() ? PBHelper.convert(request.getContext()) : null); } catch (IOException e) { throw new ServiceException(e); } BlockReportResponseProto.Builder builder = BlockReportResponseProto.newBuilder(); if (cmd != null) { builder.setCmd(PBHelper.convert(cmd)); } return builder.build(); }
Example #27
Source File: MiniDFSCluster.java From big-c with Apache License 2.0 | 5 votes |
/** * * @param dataNodeIndex - data node whose block report is desired - the index is same as for getDataNodes() * @return the block report for the specified data node */ public Map<DatanodeStorage, BlockListAsLongs> getBlockReport(String bpid, int dataNodeIndex) { if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) { throw new IndexOutOfBoundsException(); } final DataNode dn = dataNodes.get(dataNodeIndex).datanode; return DataNodeTestUtils.getFSDataset(dn).getBlockReports(bpid); }
Example #28
Source File: MiniDFSCluster.java From big-c with Apache License 2.0 | 5 votes |
/** * * @return block reports from all data nodes * BlockListAsLongs is indexed in the same order as the list of datanodes returned by getDataNodes() */ public List<Map<DatanodeStorage, BlockListAsLongs>> getAllBlockReports(String bpid) { int numDataNodes = dataNodes.size(); final List<Map<DatanodeStorage, BlockListAsLongs>> result = new ArrayList<Map<DatanodeStorage, BlockListAsLongs>>(numDataNodes); for (int i = 0; i < numDataNodes; ++i) { result.add(getBlockReport(bpid, i)); } return result; }
Example #29
Source File: NNThroughputBenchmark.java From big-c with Apache License 2.0 | 5 votes |
void formBlockReport() { // fill remaining slots with blocks that do not exist for (int idx = blocks.size()-1; idx >= nrBlocks; idx--) { Block block = new Block(blocks.size() - idx, 0, 0); blocks.set(idx, new BlockReportReplica(block)); } blockReportList = BlockListAsLongs.EMPTY; }
Example #30
Source File: TestSimulatedFSDataset.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testGetBlockReport() throws IOException { SimulatedFSDataset fsdataset = getSimulatedFSDataset(); BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid); assertEquals(0, blockReport.getNumberOfBlocks()); addSomeBlocks(fsdataset); blockReport = fsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks()); for (Block b: blockReport) { assertNotNull(b); assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes()); } }