org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestBlockListAsLongs.java From big-c with Apache License 2.0 | 6 votes |
private void checkReplicas(Map<Long,Replica> expectedReplicas, BlockListAsLongs decodedBlocks) { assertEquals(expectedReplicas.size(), decodedBlocks.getNumberOfBlocks()); Map<Long, Replica> reportReplicas = new HashMap<>(expectedReplicas); for (BlockReportReplica replica : decodedBlocks) { assertNotNull(replica); Replica expected = reportReplicas.remove(replica.getBlockId()); assertNotNull(expected); assertEquals("wrong bytes", expected.getNumBytes(), replica.getNumBytes()); assertEquals("wrong genstamp", expected.getGenerationStamp(), replica.getGenerationStamp()); assertEquals("wrong replica state", expected.getState(), replica.getState()); } assertTrue(reportReplicas.isEmpty()); }
Example #2
Source File: TestBlockListAsLongs.java From hadoop with Apache License 2.0 | 6 votes |
private void checkReplicas(Map<Long,Replica> expectedReplicas, BlockListAsLongs decodedBlocks) { assertEquals(expectedReplicas.size(), decodedBlocks.getNumberOfBlocks()); Map<Long, Replica> reportReplicas = new HashMap<>(expectedReplicas); for (BlockReportReplica replica : decodedBlocks) { assertNotNull(replica); Replica expected = reportReplicas.remove(replica.getBlockId()); assertNotNull(expected); assertEquals("wrong bytes", expected.getNumBytes(), replica.getNumBytes()); assertEquals("wrong genstamp", expected.getGenerationStamp(), replica.getGenerationStamp()); assertEquals("wrong replica state", expected.getState(), replica.getState()); } assertTrue(reportReplicas.isEmpty()); }
Example #3
Source File: NNThroughputBenchmark.java From big-c with Apache License 2.0 | 5 votes |
void formBlockReport() { // fill remaining slots with blocks that do not exist for (int idx = blocks.size()-1; idx >= nrBlocks; idx--) { Block block = new Block(blocks.size() - idx, 0, 0); blocks.set(idx, new BlockReportReplica(block)); } blockReportList = BlockListAsLongs.EMPTY; }
Example #4
Source File: BlockListAsLongs.java From hadoop with Apache License 2.0 | 5 votes |
@Override public Iterator<BlockReportReplica> iterator() { return new Iterator<BlockReportReplica>() { final BlockReportReplica block = new BlockReportReplica(); final CodedInputStream cis = buffer.newCodedInput(); private int currentBlockIndex = 0; @Override public boolean hasNext() { return currentBlockIndex < numBlocks; } @Override public BlockReportReplica next() { currentBlockIndex++; try { // zig-zag to reduce size of legacy blocks and mask off bits // we don't (yet) understand block.setBlockId(cis.readSInt64()); block.setNumBytes(cis.readRawVarint64() & NUM_BYTES_MASK); block.setGenerationStamp(cis.readRawVarint64()); long state = cis.readRawVarint64() & REPLICA_STATE_MASK; block.setState(ReplicaState.getState((int)state)); } catch (IOException e) { throw new IllegalStateException(e); } return block; } @Override public void remove() { throw new UnsupportedOperationException(); } }; }
Example #5
Source File: NNThroughputBenchmark.java From big-c with Apache License 2.0 | 5 votes |
boolean addBlock(Block blk) { if(nrBlocks == blocks.size()) { if(LOG.isDebugEnabled()) { LOG.debug("Cannot add block: datanode capacity = " + blocks.size()); } return false; } blocks.set(nrBlocks, new BlockReportReplica(blk)); nrBlocks++; return true; }
Example #6
Source File: BlockListAsLongs.java From hadoop with Apache License 2.0 | 5 votes |
public BlockReportReplica(Block block) { super(block); if (block instanceof BlockReportReplica) { this.state = ((BlockReportReplica)block).getState(); } else { this.state = ReplicaState.FINALIZED; } }
Example #7
Source File: NNThroughputBenchmark.java From hadoop with Apache License 2.0 | 5 votes |
boolean addBlock(Block blk) { if(nrBlocks == blocks.size()) { if(LOG.isDebugEnabled()) { LOG.debug("Cannot add block: datanode capacity = " + blocks.size()); } return false; } blocks.set(nrBlocks, new BlockReportReplica(blk)); nrBlocks++; return true; }
Example #8
Source File: NNThroughputBenchmark.java From hadoop with Apache License 2.0 | 5 votes |
void formBlockReport() { // fill remaining slots with blocks that do not exist for (int idx = blocks.size()-1; idx >= nrBlocks; idx--) { Block block = new Block(blocks.size() - idx, 0, 0); blocks.set(idx, new BlockReportReplica(block)); } blockReportList = BlockListAsLongs.EMPTY; }
Example #9
Source File: BlockListAsLongs.java From big-c with Apache License 2.0 | 5 votes |
public BlockReportReplica(Block block) { super(block); if (block instanceof BlockReportReplica) { this.state = ((BlockReportReplica)block).getState(); } else { this.state = ReplicaState.FINALIZED; } }
Example #10
Source File: BlockListAsLongs.java From big-c with Apache License 2.0 | 5 votes |
@Override public Iterator<BlockReportReplica> iterator() { return new Iterator<BlockReportReplica>() { final BlockReportReplica block = new BlockReportReplica(); final CodedInputStream cis = buffer.newCodedInput(); private int currentBlockIndex = 0; @Override public boolean hasNext() { return currentBlockIndex < numBlocks; } @Override public BlockReportReplica next() { currentBlockIndex++; try { // zig-zag to reduce size of legacy blocks and mask off bits // we don't (yet) understand block.setBlockId(cis.readSInt64()); block.setNumBytes(cis.readRawVarint64() & NUM_BYTES_MASK); block.setGenerationStamp(cis.readRawVarint64()); long state = cis.readRawVarint64() & REPLICA_STATE_MASK; block.setState(ReplicaState.getState((int)state)); } catch (IOException e) { throw new IllegalStateException(e); } return block; } @Override public void remove() { throw new UnsupportedOperationException(); } }; }
Example #11
Source File: BlockListAsLongs.java From big-c with Apache License 2.0 | 4 votes |
@Override public Iterator<BlockReportReplica> iterator() { return Collections.emptyIterator(); }
Example #12
Source File: BlockReportTestBase.java From big-c with Apache License 2.0 | 4 votes |
private static StorageBlockReport[] getBlockReports( DataNode dn, String bpid, boolean corruptOneBlockGs, boolean corruptOneBlockLen) { Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists = dn.getFSDataset().getBlockReports(bpid); // Send block report StorageBlockReport[] reports = new StorageBlockReport[perVolumeBlockLists.size()]; boolean corruptedGs = false; boolean corruptedLen = false; int reportIndex = 0; for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) { DatanodeStorage dnStorage = kvPair.getKey(); BlockListAsLongs blockList = kvPair.getValue(); // Walk the list of blocks until we find one each to corrupt the // generation stamp and length, if so requested. BlockListAsLongs.Builder builder = BlockListAsLongs.builder(); for (BlockReportReplica block : blockList) { if (corruptOneBlockGs && !corruptedGs) { long gsOld = block.getGenerationStamp(); long gsNew; do { gsNew = rand.nextInt(); } while (gsNew == gsOld); block.setGenerationStamp(gsNew); LOG.info("Corrupted the GS for block ID " + block); corruptedGs = true; } else if (corruptOneBlockLen && !corruptedLen) { long lenOld = block.getNumBytes(); long lenNew; do { lenNew = rand.nextInt((int)lenOld - 1); } while (lenNew == lenOld); block.setNumBytes(lenNew); LOG.info("Corrupted the length for block ID " + block); corruptedLen = true; } builder.add(new BlockReportReplica(block)); } reports[reportIndex++] = new StorageBlockReport(dnStorage, builder.build()); } return reports; }
Example #13
Source File: NNThroughputBenchmark.java From big-c with Apache License 2.0 | 4 votes |
TinyDatanode(int dnIdx, int blockCapacity) throws IOException { this.dnIdx = dnIdx; this.blocks = new ArrayList<BlockReportReplica>(blockCapacity); this.nrBlocks = 0; }
Example #14
Source File: BlockManager.java From big-c with Apache License 2.0 | 4 votes |
/** * processFirstBlockReport is intended only for processing "initial" block * reports, the first block report received from a DN after it registers. * It just adds all the valid replicas to the datanode, without calculating * a toRemove list (since there won't be any). It also silently discards * any invalid blocks, thereby deferring their processing until * the next block report. * @param storageInfo - DatanodeStorageInfo that sent the report * @param report - the initial block report, to be processed * @throws IOException */ private void processFirstBlockReport( final DatanodeStorageInfo storageInfo, final BlockListAsLongs report) throws IOException { if (report == null) return; assert (namesystem.hasWriteLock()); assert (storageInfo.getBlockReportCount() == 0); for (BlockReportReplica iblk : report) { ReplicaState reportedState = iblk.getState(); if (shouldPostponeBlocksFromFuture && namesystem.isGenStampInFuture(iblk)) { queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_FUTURE_GENSTAMP); continue; } BlockInfoContiguous storedBlock = blocksMap.getStoredBlock(iblk); // If block does not belong to any file, we are done. if (storedBlock == null) continue; // If block is corrupt, mark it and continue to next block. BlockUCState ucState = storedBlock.getBlockUCState(); BlockToMarkCorrupt c = checkReplicaCorrupt( iblk, reportedState, storedBlock, ucState, storageInfo.getDatanodeDescriptor()); if (c != null) { if (shouldPostponeBlocksFromFuture) { // In the Standby, we may receive a block report for a file that we // just have an out-of-date gen-stamp or state for, for example. queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_CORRUPT_STATE); } else { markBlockAsCorrupt(c, storageInfo, storageInfo.getDatanodeDescriptor()); } continue; } // If block is under construction, add this replica to its list if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { ((BlockInfoContiguousUnderConstruction)storedBlock) .addReplicaIfNotPresent(storageInfo, iblk, reportedState); // OpenFileBlocks only inside snapshots also will be added to safemode // threshold. So we need to update such blocks to safemode // refer HDFS-5283 BlockInfoContiguousUnderConstruction blockUC = (BlockInfoContiguousUnderConstruction) storedBlock; if (namesystem.isInSnapshot(blockUC)) { int numOfReplicas = blockUC.getNumExpectedLocations(); namesystem.incrementSafeBlockCount(numOfReplicas); } //and fall through to next clause } //add replica if appropriate if (reportedState == ReplicaState.FINALIZED) { addStoredBlockImmediate(storedBlock, storageInfo); } } }
Example #15
Source File: BlockListAsLongs.java From big-c with Apache License 2.0 | 4 votes |
private BlockReportReplica() { }
Example #16
Source File: BlockListAsLongs.java From big-c with Apache License 2.0 | 4 votes |
@Override public Iterator<BlockReportReplica> iterator() { return new Iterator<BlockReportReplica>() { private final BlockReportReplica block = new BlockReportReplica(); final Iterator<Long> iter = values.iterator(); private int currentBlockIndex = 0; @Override public boolean hasNext() { return currentBlockIndex < numBlocks; } @Override public BlockReportReplica next() { if (currentBlockIndex == finalizedBlocks) { // verify the presence of the delimiter block readBlock(); Preconditions.checkArgument(block.getBlockId() == -1 && block.getNumBytes() == -1 && block.getGenerationStamp() == -1, "Invalid delimiter block"); } readBlock(); if (currentBlockIndex++ < finalizedBlocks) { block.setState(ReplicaState.FINALIZED); } else { block.setState(ReplicaState.getState(iter.next().intValue())); } return block; } private void readBlock() { block.setBlockId(iter.next()); block.setNumBytes(iter.next()); block.setGenerationStamp(iter.next()); } @Override public void remove() { throw new UnsupportedOperationException(); } }; }
Example #17
Source File: BlockListAsLongs.java From big-c with Apache License 2.0 | 4 votes |
@Override public long[] getBlockListAsLongs() { // terribly inefficient but only occurs if server tries to transcode // an undecoded buffer into longs - ie. it will never happen but let's // handle it anyway if (numFinalized == -1) { int n = 0; for (Replica replica : this) { if (replica.getState() == ReplicaState.FINALIZED) { n++; } } numFinalized = n; } int numUc = numBlocks - numFinalized; int size = 2 + 3*(numFinalized+1) + 4*(numUc); long[] longs = new long[size]; longs[0] = numFinalized; longs[1] = numUc; int idx = 2; int ucIdx = idx + 3*numFinalized; // delimiter block longs[ucIdx++] = -1; longs[ucIdx++] = -1; longs[ucIdx++] = -1; for (BlockReportReplica block : this) { switch (block.getState()) { case FINALIZED: { longs[idx++] = block.getBlockId(); longs[idx++] = block.getNumBytes(); longs[idx++] = block.getGenerationStamp(); break; } default: { longs[ucIdx++] = block.getBlockId(); longs[ucIdx++] = block.getNumBytes(); longs[ucIdx++] = block.getGenerationStamp(); longs[ucIdx++] = block.getState().getValue(); break; } } } return longs; }
Example #18
Source File: BlockListAsLongs.java From hadoop with Apache License 2.0 | 4 votes |
@Override public Iterator<BlockReportReplica> iterator() { return Collections.emptyIterator(); }
Example #19
Source File: BlockReportTestBase.java From hadoop with Apache License 2.0 | 4 votes |
private static StorageBlockReport[] getBlockReports( DataNode dn, String bpid, boolean corruptOneBlockGs, boolean corruptOneBlockLen) { Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists = dn.getFSDataset().getBlockReports(bpid); // Send block report StorageBlockReport[] reports = new StorageBlockReport[perVolumeBlockLists.size()]; boolean corruptedGs = false; boolean corruptedLen = false; int reportIndex = 0; for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) { DatanodeStorage dnStorage = kvPair.getKey(); BlockListAsLongs blockList = kvPair.getValue(); // Walk the list of blocks until we find one each to corrupt the // generation stamp and length, if so requested. BlockListAsLongs.Builder builder = BlockListAsLongs.builder(); for (BlockReportReplica block : blockList) { if (corruptOneBlockGs && !corruptedGs) { long gsOld = block.getGenerationStamp(); long gsNew; do { gsNew = rand.nextInt(); } while (gsNew == gsOld); block.setGenerationStamp(gsNew); LOG.info("Corrupted the GS for block ID " + block); corruptedGs = true; } else if (corruptOneBlockLen && !corruptedLen) { long lenOld = block.getNumBytes(); long lenNew; do { lenNew = rand.nextInt((int)lenOld - 1); } while (lenNew == lenOld); block.setNumBytes(lenNew); LOG.info("Corrupted the length for block ID " + block); corruptedLen = true; } builder.add(new BlockReportReplica(block)); } reports[reportIndex++] = new StorageBlockReport(dnStorage, builder.build()); } return reports; }
Example #20
Source File: NNThroughputBenchmark.java From hadoop with Apache License 2.0 | 4 votes |
TinyDatanode(int dnIdx, int blockCapacity) throws IOException { this.dnIdx = dnIdx; this.blocks = new ArrayList<BlockReportReplica>(blockCapacity); this.nrBlocks = 0; }
Example #21
Source File: BlockManager.java From hadoop with Apache License 2.0 | 4 votes |
/** * processFirstBlockReport is intended only for processing "initial" block * reports, the first block report received from a DN after it registers. * It just adds all the valid replicas to the datanode, without calculating * a toRemove list (since there won't be any). It also silently discards * any invalid blocks, thereby deferring their processing until * the next block report. * @param storageInfo - DatanodeStorageInfo that sent the report * @param report - the initial block report, to be processed * @throws IOException */ private void processFirstBlockReport( final DatanodeStorageInfo storageInfo, final BlockListAsLongs report) throws IOException { if (report == null) return; assert (namesystem.hasWriteLock()); assert (storageInfo.getBlockReportCount() == 0); for (BlockReportReplica iblk : report) { ReplicaState reportedState = iblk.getState(); if (shouldPostponeBlocksFromFuture && namesystem.isGenStampInFuture(iblk)) { queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_FUTURE_GENSTAMP); continue; } BlockInfoContiguous storedBlock = blocksMap.getStoredBlock(iblk); // If block does not belong to any file, we are done. if (storedBlock == null) continue; // If block is corrupt, mark it and continue to next block. BlockUCState ucState = storedBlock.getBlockUCState(); BlockToMarkCorrupt c = checkReplicaCorrupt( iblk, reportedState, storedBlock, ucState, storageInfo.getDatanodeDescriptor()); if (c != null) { if (shouldPostponeBlocksFromFuture) { // In the Standby, we may receive a block report for a file that we // just have an out-of-date gen-stamp or state for, for example. queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_CORRUPT_STATE); } else { markBlockAsCorrupt(c, storageInfo, storageInfo.getDatanodeDescriptor()); } continue; } // If block is under construction, add this replica to its list if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { ((BlockInfoContiguousUnderConstruction)storedBlock) .addReplicaIfNotPresent(storageInfo, iblk, reportedState); // OpenFileBlocks only inside snapshots also will be added to safemode // threshold. So we need to update such blocks to safemode // refer HDFS-5283 BlockInfoContiguousUnderConstruction blockUC = (BlockInfoContiguousUnderConstruction) storedBlock; if (namesystem.isInSnapshot(blockUC)) { int numOfReplicas = blockUC.getNumExpectedLocations(); namesystem.incrementSafeBlockCount(numOfReplicas); } //and fall through to next clause } //add replica if appropriate if (reportedState == ReplicaState.FINALIZED) { addStoredBlockImmediate(storedBlock, storageInfo); } } }
Example #22
Source File: BlockListAsLongs.java From hadoop with Apache License 2.0 | 4 votes |
private BlockReportReplica() { }
Example #23
Source File: BlockListAsLongs.java From hadoop with Apache License 2.0 | 4 votes |
@Override public Iterator<BlockReportReplica> iterator() { return new Iterator<BlockReportReplica>() { private final BlockReportReplica block = new BlockReportReplica(); final Iterator<Long> iter = values.iterator(); private int currentBlockIndex = 0; @Override public boolean hasNext() { return currentBlockIndex < numBlocks; } @Override public BlockReportReplica next() { if (currentBlockIndex == finalizedBlocks) { // verify the presence of the delimiter block readBlock(); Preconditions.checkArgument(block.getBlockId() == -1 && block.getNumBytes() == -1 && block.getGenerationStamp() == -1, "Invalid delimiter block"); } readBlock(); if (currentBlockIndex++ < finalizedBlocks) { block.setState(ReplicaState.FINALIZED); } else { block.setState(ReplicaState.getState(iter.next().intValue())); } return block; } private void readBlock() { block.setBlockId(iter.next()); block.setNumBytes(iter.next()); block.setGenerationStamp(iter.next()); } @Override public void remove() { throw new UnsupportedOperationException(); } }; }
Example #24
Source File: BlockListAsLongs.java From hadoop with Apache License 2.0 | 4 votes |
@Override public long[] getBlockListAsLongs() { // terribly inefficient but only occurs if server tries to transcode // an undecoded buffer into longs - ie. it will never happen but let's // handle it anyway if (numFinalized == -1) { int n = 0; for (Replica replica : this) { if (replica.getState() == ReplicaState.FINALIZED) { n++; } } numFinalized = n; } int numUc = numBlocks - numFinalized; int size = 2 + 3*(numFinalized+1) + 4*(numUc); long[] longs = new long[size]; longs[0] = numFinalized; longs[1] = numUc; int idx = 2; int ucIdx = idx + 3*numFinalized; // delimiter block longs[ucIdx++] = -1; longs[ucIdx++] = -1; longs[ucIdx++] = -1; for (BlockReportReplica block : this) { switch (block.getState()) { case FINALIZED: { longs[idx++] = block.getBlockId(); longs[idx++] = block.getNumBytes(); longs[idx++] = block.getGenerationStamp(); break; } default: { longs[ucIdx++] = block.getBlockId(); longs[ucIdx++] = block.getNumBytes(); longs[ucIdx++] = block.getGenerationStamp(); longs[ucIdx++] = block.getState().getValue(); break; } } } return longs; }
Example #25
Source File: BlockListAsLongs.java From big-c with Apache License 2.0 | 2 votes |
/** * Returns a singleton iterator over blocks in the block report. Do not * add the returned blocks to a collection. * @return Iterator */ abstract public Iterator<BlockReportReplica> iterator();
Example #26
Source File: BlockListAsLongs.java From hadoop with Apache License 2.0 | 2 votes |
/** * Returns a singleton iterator over blocks in the block report. Do not * add the returned blocks to a collection. * @return Iterator */ abstract public Iterator<BlockReportReplica> iterator();