Java Code Examples for org.apache.hadoop.hdfs.protocol.Block#getBlockId()
The following examples show how to use
org.apache.hadoop.hdfs.protocol.Block#getBlockId() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BlocksMap.java From RDFS with Apache License 2.0 | 6 votes |
/** * Add block b belonging to the specified file inode to the map, this * overwrites the map with the new block information. */ BlockInfo updateINode(BlockInfo oldBlock, Block newBlock, INodeFile iNode) throws IOException { // If the old block is not same as the new block, probably the GS was // bumped up, hence remove the old block and replace it with the new one. if (oldBlock != null && !oldBlock.equals(newBlock)) { if (oldBlock.getBlockId() != newBlock.getBlockId()) { throw new IOException("block ids don't match : " + oldBlock + ", " + newBlock); } removeBlock(oldBlock); } BlockInfo info = checkBlockInfo(newBlock, iNode.getReplication()); info.set(newBlock.getBlockId(), newBlock.getNumBytes(), newBlock.getGenerationStamp()); info.inode = iNode; return info; }
Example 2
Source File: TestBalancer.java From RDFS with Apache License 2.0 | 6 votes |
private Block[] generateBlocks(long size, short numNodes) throws IOException { cluster = new MiniDFSCluster( CONF, numNodes, true, null); try { cluster.waitActive(); client = DFSClient.createNamenode(CONF); short replicationFactor = (short)(numNodes-1); long fileLen = size/replicationFactor; createFile(fileLen, replicationFactor); List<LocatedBlock> locatedBlocks = client. getBlockLocations(fileName, 0, fileLen).getLocatedBlocks(); int numOfBlocks = locatedBlocks.size(); Block[] blocks = new Block[numOfBlocks]; for(int i=0; i<numOfBlocks; i++) { Block b = locatedBlocks.get(i).getBlock(); blocks[i] = new Block(b.getBlockId(), b.getNumBytes(), b.getGenerationStamp()); } return blocks; } finally { cluster.shutdown(); } }
Example 3
Source File: TestBlockMissingException.java From RDFS with Apache License 2.0 | 6 votes |
void corruptBlock(Path file, Block blockNum) throws IOException { long id = blockNum.getBlockId(); // Now deliberately remove/truncate data blocks from the block. // for (int i = 0; i < NUM_DATANODES; i++) { File[] dirs = getDataNodeDirs(i); for (int j = 0; j < dirs.length; j++) { File[] blocks = dirs[j].listFiles(); assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length >= 0)); for (int idx = 0; idx < blocks.length; idx++) { if (blocks[idx].getName().startsWith("blk_" + id) && !blocks[idx].getName().endsWith(".meta")) { blocks[idx].delete(); LOG.info("Deleted block " + blocks[idx]); } } } } }
Example 4
Source File: FSEditLogLoader.java From big-c with Apache License 2.0 | 5 votes |
/** * Add a new block into the given INodeFile */ private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file) throws IOException { BlockInfoContiguous[] oldBlocks = file.getBlocks(); Block pBlock = op.getPenultimateBlock(); Block newBlock= op.getLastBlock(); if (pBlock != null) { // the penultimate block is not null Preconditions.checkState(oldBlocks != null && oldBlocks.length > 0); // compare pBlock with the last block of oldBlocks Block oldLastBlock = oldBlocks[oldBlocks.length - 1]; if (oldLastBlock.getBlockId() != pBlock.getBlockId() || oldLastBlock.getGenerationStamp() != pBlock.getGenerationStamp()) { throw new IOException( "Mismatched block IDs or generation stamps for the old last block of file " + op.getPath() + ", the old last block is " + oldLastBlock + ", and the block read from editlog is " + pBlock); } oldLastBlock.setNumBytes(pBlock.getNumBytes()); if (oldLastBlock instanceof BlockInfoContiguousUnderConstruction) { fsNamesys.getBlockManager().forceCompleteBlock(file, (BlockInfoContiguousUnderConstruction) oldLastBlock); fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock); } } else { // the penultimate block is null Preconditions.checkState(oldBlocks == null || oldBlocks.length == 0); } // add the new block BlockInfoContiguous newBI = new BlockInfoContiguousUnderConstruction( newBlock, file.getBlockReplication()); fsNamesys.getBlockManager().addBlockCollection(newBI, file); file.addBlock(newBI); fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock); }
Example 5
Source File: TestFileCorruption.java From big-c with Apache License 2.0 | 5 votes |
public static ExtendedBlock getBlock(String bpid, File dataDir) { List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir); if (metadataFiles == null || metadataFiles.isEmpty()) { return null; } File metadataFile = metadataFiles.get(0); File blockFile = Block.metaToBlockFile(metadataFile); return new ExtendedBlock(bpid, Block.getBlockId(blockFile.getName()), blockFile.length(), Block.getGenerationStamp(metadataFile.getName())); }
Example 6
Source File: BlockInfoContiguousUnderConstruction.java From big-c with Apache License 2.0 | 5 votes |
/** * Commit block's length and generation stamp as reported by the client. * Set block state to {@link BlockUCState#COMMITTED}. * @param block - contains client reported block length and generation * @throws IOException if block ids are inconsistent. */ void commitBlock(Block block) throws IOException { if(getBlockId() != block.getBlockId()) throw new IOException("Trying to commit inconsistent block: id = " + block.getBlockId() + ", expected id = " + getBlockId()); blockUCState = BlockUCState.COMMITTED; this.set(getBlockId(), block.getNumBytes(), block.getGenerationStamp()); // Sort out invalid replicas. setGenerationStampAndVerifyReplicas(block.getGenerationStamp()); }
Example 7
Source File: FSDataset.java From RDFS with Apache License 2.0 | 5 votes |
/** {@inheritDoc} */ public void updateBlock(int namespaceId, Block oldblock, Block newblock) throws IOException { if (oldblock.getBlockId() != newblock.getBlockId()) { throw new IOException("Cannot update oldblock (=" + oldblock + ") to newblock (=" + newblock + ")."); } // Protect against a straggler updateblock call moving a block backwards // in time. boolean isValidUpdate = (newblock.getGenerationStamp() > oldblock.getGenerationStamp()) || (newblock.getGenerationStamp() == oldblock.getGenerationStamp() && newblock.getNumBytes() == oldblock.getNumBytes()); if (!isValidUpdate) { throw new IOException( "Cannot update oldblock=" + oldblock + " to newblock=" + newblock + " since generation stamps must " + "increase, or else length must not change."); } for(;;) { final List<Thread> threads = tryUpdateBlock(namespaceId, oldblock, newblock); if (threads == null) { DataNode.LOG.info("Updated Block: namespaceid: " + namespaceId + " oldBlock: " + oldblock + " newBlock: " + newblock); return; } DataNode.LOG.info("Waiting other threads to update block: namespaceid: " + namespaceId + " oldBlock: " + oldblock + " newBlock: " + newblock); interruptAndJoinThreads(threads); } }
Example 8
Source File: TestFileCorruption.java From hadoop with Apache License 2.0 | 5 votes |
public static ExtendedBlock getBlock(String bpid, File dataDir) { List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir); if (metadataFiles == null || metadataFiles.isEmpty()) { return null; } File metadataFile = metadataFiles.get(0); File blockFile = Block.metaToBlockFile(metadataFile); return new ExtendedBlock(bpid, Block.getBlockId(blockFile.getName()), blockFile.length(), Block.getGenerationStamp(metadataFile.getName())); }
Example 9
Source File: BlockInfoContiguousUnderConstruction.java From hadoop with Apache License 2.0 | 5 votes |
/** * Commit block's length and generation stamp as reported by the client. * Set block state to {@link BlockUCState#COMMITTED}. * @param block - contains client reported block length and generation * @throws IOException if block ids are inconsistent. */ void commitBlock(Block block) throws IOException { if(getBlockId() != block.getBlockId()) throw new IOException("Trying to commit inconsistent block: id = " + block.getBlockId() + ", expected id = " + getBlockId()); blockUCState = BlockUCState.COMMITTED; this.set(getBlockId(), block.getNumBytes(), block.getGenerationStamp()); // Sort out invalid replicas. setGenerationStampAndVerifyReplicas(block.getGenerationStamp()); }
Example 10
Source File: TestRaidDfs.java From RDFS with Apache License 2.0 | 5 votes |
public static void corruptBlock(Path file, Block blockNum, int numDataNodes, long offset, MiniDFSCluster cluster) throws IOException { long id = blockNum.getBlockId(); // Now deliberately remove/truncate data blocks from the block. // for (int i = 0; i < numDataNodes; i++) { File[] dirs = getDataNodeDirs(i, cluster); for (int j = 0; j < dirs.length; j++) { File[] blocks = dirs[j].listFiles(); assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length >= 0)); for (int idx = 0; idx < blocks.length; idx++) { if (blocks[idx].getName().startsWith("blk_" + id) && !blocks[idx].getName().endsWith(".meta")) { // Corrupt File f = blocks[idx]; RandomAccessFile raf = new RandomAccessFile(f, "rw"); raf.seek(offset); int data = raf.readInt(); raf.seek(offset); raf.writeInt(data+1); LOG.info("Corrupted block " + blocks[idx]); } } } } }
Example 11
Source File: DataStorage.java From hadoop with Apache License 2.0 | 4 votes |
/** * Find duplicate entries with an array of LinkArgs. * Duplicate entries are entries with the same last path component. */ static ArrayList<LinkArgs> findDuplicateEntries(ArrayList<LinkArgs> all) { // Find duplicates by sorting the list by the final path component. Collections.sort(all, new Comparator<LinkArgs>() { /** * Compare two LinkArgs objects, such that objects with the same * terminal source path components are grouped together. */ @Override public int compare(LinkArgs a, LinkArgs b) { return ComparisonChain.start(). compare(a.src.getName(), b.src.getName()). compare(a.src, b.src). compare(a.dst, b.dst). result(); } }); final ArrayList<LinkArgs> duplicates = Lists.newArrayList(); Long prevBlockId = null; boolean prevWasMeta = false; boolean addedPrev = false; for (int i = 0; i < all.size(); i++) { LinkArgs args = all.get(i); long blockId = Block.getBlockId(args.src.getName()); boolean isMeta = Block.isMetaFilename(args.src.getName()); if ((prevBlockId == null) || (prevBlockId.longValue() != blockId)) { prevBlockId = blockId; addedPrev = false; } else if (isMeta == prevWasMeta) { // If we saw another file for the same block ID previously, // and it had the same meta-ness as this file, we have a // duplicate. duplicates.add(args); if (!addedPrev) { duplicates.add(all.get(i - 1)); } addedPrev = true; } else { addedPrev = false; } prevWasMeta = isMeta; } return duplicates; }
Example 12
Source File: NamenodeFsck.java From big-c with Apache License 2.0 | 4 votes |
/** * Check block information given a blockId number * */ public void blockIdCK(String blockId) { if(blockId == null) { out.println("Please provide valid blockId!"); return; } BlockManager bm = namenode.getNamesystem().getBlockManager(); try { //get blockInfo Block block = new Block(Block.getBlockId(blockId)); //find which file this block belongs to BlockInfoContiguous blockInfo = bm.getStoredBlock(block); if(blockInfo == null) { out.println("Block "+ blockId +" " + NONEXISTENT_STATUS); LOG.warn("Block "+ blockId + " " + NONEXISTENT_STATUS); return; } BlockCollection bc = bm.getBlockCollection(blockInfo); INode iNode = (INode) bc; NumberReplicas numberReplicas= bm.countNodes(block); out.println("Block Id: " + blockId); out.println("Block belongs to: "+iNode.getFullPathName()); out.println("No. of Expected Replica: " + bc.getBlockReplication()); out.println("No. of live Replica: " + numberReplicas.liveReplicas()); out.println("No. of excess Replica: " + numberReplicas.excessReplicas()); out.println("No. of stale Replica: " + numberReplicas.replicasOnStaleNodes()); out.println("No. of decommission Replica: " + numberReplicas.decommissionedReplicas()); out.println("No. of corrupted Replica: " + numberReplicas.corruptReplicas()); //record datanodes that have corrupted block replica Collection<DatanodeDescriptor> corruptionRecord = null; if (bm.getCorruptReplicas(block) != null) { corruptionRecord = bm.getCorruptReplicas(block); } //report block replicas status on datanodes for(int idx = (blockInfo.numNodes()-1); idx >= 0; idx--) { DatanodeDescriptor dn = blockInfo.getDatanode(idx); out.print("Block replica on datanode/rack: " + dn.getHostName() + dn.getNetworkLocation() + " "); if (corruptionRecord != null && corruptionRecord.contains(dn)) { out.print(CORRUPT_STATUS+"\t ReasonCode: "+ bm.getCorruptReason(block,dn)); } else if (dn.isDecommissioned() ){ out.print(DECOMMISSIONED_STATUS); } else if (dn.isDecommissionInProgress()) { out.print(DECOMMISSIONING_STATUS); } else { out.print(HEALTHY_STATUS); } out.print("\n"); } } catch (Exception e){ String errMsg = "Fsck on blockId '" + blockId; LOG.warn(errMsg, e); out.println(e.getMessage()); out.print("\n\n" + errMsg); LOG.warn("Error in looking up block", e); } }
Example 13
Source File: CorruptReplicasMap.java From big-c with Apache License 2.0 | 4 votes |
/** * Return a range of corrupt replica block ids. Up to numExpectedBlocks * blocks starting at the next block after startingBlockId are returned * (fewer if numExpectedBlocks blocks are unavailable). If startingBlockId * is null, up to numExpectedBlocks blocks are returned from the beginning. * If startingBlockId cannot be found, null is returned. * * @param numExpectedBlocks Number of block ids to return. * 0 <= numExpectedBlocks <= 100 * @param startingBlockId Block id from which to start. If null, start at * beginning. * @return Up to numExpectedBlocks blocks from startingBlockId if it exists * */ long[] getCorruptReplicaBlockIds(int numExpectedBlocks, Long startingBlockId) { if (numExpectedBlocks < 0 || numExpectedBlocks > 100) { return null; } Iterator<Block> blockIt = corruptReplicasMap.keySet().iterator(); // if the starting block id was specified, iterate over keys until // we find the matching block. If we find a matching block, break // to leave the iterator on the next block after the specified block. if (startingBlockId != null) { boolean isBlockFound = false; while (blockIt.hasNext()) { Block b = blockIt.next(); if (b.getBlockId() == startingBlockId) { isBlockFound = true; break; } } if (!isBlockFound) { return null; } } ArrayList<Long> corruptReplicaBlockIds = new ArrayList<Long>(); // append up to numExpectedBlocks blockIds to our list for(int i=0; i<numExpectedBlocks && blockIt.hasNext(); i++) { corruptReplicaBlockIds.add(blockIt.next().getBlockId()); } long[] ret = new long[corruptReplicaBlockIds.size()]; for(int i=0; i<ret.length; i++) { ret[i] = corruptReplicaBlockIds.get(i); } return ret; }
Example 14
Source File: TestBlockReportProcessingTime.java From RDFS with Apache License 2.0 | 4 votes |
/** Test the case when a block report processing at namenode * startup time is fast. */ public void testFasterBlockReports() throws Exception { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster(conf, 40, true, null); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); NameNode namenode = cluster.getNameNode(); LOG.info("Cluster Alive."); // create a single file with one block. Path file1 = new Path("/filestatus.dat"); final long FILE_LEN = 1L; DFSTestUtil.createFile(fs, file1, FILE_LEN, (short)2, 1L); LocatedBlocks locations = namenode.getBlockLocations( file1.toString(), 0, Long.MAX_VALUE); assertTrue(locations.locatedBlockCount() == 1); Block block = locations.get(0).getBlock(); long blkid = block.getBlockId(); long genstamp = block.getGenerationStamp(); long length = block.getNumBytes(); // put namenode in safemode namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER); DatanodeInfo[] dinfo = namenode.getDatanodeReport(DatanodeReportType.ALL); LOG.info("Found " + dinfo.length + " number of datanodes."); // create artificial block replicas on each datanode final int NUMBLOCKS = 1000; final int LONGS_PER_BLOCK = 3; long tmpblocks[] = new long[NUMBLOCKS * LONGS_PER_BLOCK]; for (int i = 0; i < NUMBLOCKS; i++) { tmpblocks[i * LONGS_PER_BLOCK] = blkid; tmpblocks[i * LONGS_PER_BLOCK + 1] = length; tmpblocks[i * LONGS_PER_BLOCK + 2] = genstamp; } BlockListAsLongs blkList = new BlockListAsLongs(tmpblocks); // process block report from all machines long total = 0; for (int i = 0; i < dinfo.length; i++) { long start = now(); namenode.namesystem.processReport(dinfo[i], blkList); total += now() - start; LOG.info("Processed block report from " + dinfo[i]); } LOG.info("Average of all block report processing time " + " from " + dinfo.length + " datanodes is " + (total/dinfo.length) + " milliseconds."); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 15
Source File: CacheReplicationMonitor.java From hadoop with Apache License 2.0 | 4 votes |
/** * Apply a CacheDirective to a file. * * @param directive The CacheDirective to apply. * @param file The file. */ private void rescanFile(CacheDirective directive, INodeFile file) { BlockInfoContiguous[] blockInfos = file.getBlocks(); // Increment the "needed" statistics directive.addFilesNeeded(1); // We don't cache UC blocks, don't add them to the total here long neededTotal = file.computeFileSizeNotIncludingLastUcBlock() * directive.getReplication(); directive.addBytesNeeded(neededTotal); // The pool's bytesNeeded is incremented as we scan. If the demand // thus far plus the demand of this file would exceed the pool's limit, // do not cache this file. CachePool pool = directive.getPool(); if (pool.getBytesNeeded() > pool.getLimit()) { LOG.debug("Directive {}: not scanning file {} because " + "bytesNeeded for pool {} is {}, but the pool's limit is {}", directive.getId(), file.getFullPathName(), pool.getPoolName(), pool.getBytesNeeded(), pool.getLimit()); return; } long cachedTotal = 0; for (BlockInfoContiguous blockInfo : blockInfos) { if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) { // We don't try to cache blocks that are under construction. LOG.trace("Directive {}: can't cache block {} because it is in state " + "{}, not COMPLETE.", directive.getId(), blockInfo, blockInfo.getBlockUCState() ); continue; } Block block = new Block(blockInfo.getBlockId()); CachedBlock ncblock = new CachedBlock(block.getBlockId(), directive.getReplication(), mark); CachedBlock ocblock = cachedBlocks.get(ncblock); if (ocblock == null) { cachedBlocks.put(ncblock); ocblock = ncblock; } else { // Update bytesUsed using the current replication levels. // Assumptions: we assume that all the blocks are the same length // on each datanode. We can assume this because we're only caching // blocks in state COMPLETE. // Note that if two directives are caching the same block(s), they will // both get them added to their bytesCached. List<DatanodeDescriptor> cachedOn = ocblock.getDatanodes(Type.CACHED); long cachedByBlock = Math.min(cachedOn.size(), directive.getReplication()) * blockInfo.getNumBytes(); cachedTotal += cachedByBlock; if ((mark != ocblock.getMark()) || (ocblock.getReplication() < directive.getReplication())) { // // Overwrite the block's replication and mark in two cases: // // 1. If the mark on the CachedBlock is different from the mark for // this scan, that means the block hasn't been updated during this // scan, and we should overwrite whatever is there, since it is no // longer valid. // // 2. If the replication in the CachedBlock is less than what the // directive asks for, we want to increase the block's replication // field to what the directive asks for. // ocblock.setReplicationAndMark(directive.getReplication(), mark); } } LOG.trace("Directive {}: setting replication for block {} to {}", directive.getId(), blockInfo, ocblock.getReplication()); } // Increment the "cached" statistics directive.addBytesCached(cachedTotal); if (cachedTotal == neededTotal) { directive.addFilesCached(1); } LOG.debug("Directive {}: caching {}: {}/{} bytes", directive.getId(), file.getFullPathName(), cachedTotal, neededTotal); }
Example 16
Source File: DataStorage.java From big-c with Apache License 2.0 | 4 votes |
/** * Find duplicate entries with an array of LinkArgs. * Duplicate entries are entries with the same last path component. */ static ArrayList<LinkArgs> findDuplicateEntries(ArrayList<LinkArgs> all) { // Find duplicates by sorting the list by the final path component. Collections.sort(all, new Comparator<LinkArgs>() { /** * Compare two LinkArgs objects, such that objects with the same * terminal source path components are grouped together. */ @Override public int compare(LinkArgs a, LinkArgs b) { return ComparisonChain.start(). compare(a.src.getName(), b.src.getName()). compare(a.src, b.src). compare(a.dst, b.dst). result(); } }); final ArrayList<LinkArgs> duplicates = Lists.newArrayList(); Long prevBlockId = null; boolean prevWasMeta = false; boolean addedPrev = false; for (int i = 0; i < all.size(); i++) { LinkArgs args = all.get(i); long blockId = Block.getBlockId(args.src.getName()); boolean isMeta = Block.isMetaFilename(args.src.getName()); if ((prevBlockId == null) || (prevBlockId.longValue() != blockId)) { prevBlockId = blockId; addedPrev = false; } else if (isMeta == prevWasMeta) { // If we saw another file for the same block ID previously, // and it had the same meta-ness as this file, we have a // duplicate. duplicates.add(args); if (!addedPrev) { duplicates.add(all.get(i - 1)); } addedPrev = true; } else { addedPrev = false; } prevWasMeta = isMeta; } return duplicates; }
Example 17
Source File: FSDataset.java From RDFS with Apache License 2.0 | 4 votes |
/** * Try to update an old block to a new block. * If there are ongoing create threads running for the old block, * the threads will be returned without updating the block. * * @return ongoing create threads if there is any. Otherwise, return null. */ private List<Thread> tryUpdateBlock(int namespaceId, Block oldblock, Block newblock) throws IOException { lock.writeLock().lock(); try { //check ongoing create threads ArrayList<Thread> activeThreads = getActiveThreads(namespaceId, oldblock); if (activeThreads != null) { return activeThreads; } if (volumeMap.get(namespaceId, oldblock) == null) { throw new IOException("Block " + oldblock + " doesn't exist or has been recovered to a new generation "); } //No ongoing create threads is alive. Update block. File blockFile = findBlockFile(namespaceId, oldblock.getBlockId()); if (blockFile == null) { throw new IOException("Block " + oldblock + " does not exist."); } File oldMetaFile = findMetaFile(blockFile); long oldgs = parseGenerationStamp(blockFile, oldMetaFile); // First validate the update //update generation stamp if (oldgs > newblock.getGenerationStamp()) { throw new IOException("Cannot update block (id=" + newblock.getBlockId() + ") generation stamp from " + oldgs + " to " + newblock.getGenerationStamp()); } //update length if (newblock.getNumBytes() > oldblock.getNumBytes()) { throw new IOException("Cannot update block file (=" + blockFile + ") length from " + oldblock.getNumBytes() + " to " + newblock.getNumBytes()); } // Although we've waited for the active threads all dead before updating // the map so there should be no data race there, we still create new // ActiveFile object to make sure in case another thread holds it, // it won't cause any problem for us. // try { volumeMap.copyOngoingCreates(namespaceId, oldblock); } catch (CloneNotSupportedException e) { // It should never happen. throw new IOException("Cannot clone ActiveFile object", e); } // Now perform the update // rename meta file to a tmp file File tmpMetaFile = new File(oldMetaFile.getParent(), oldMetaFile.getName() + "_tmp" + newblock.getGenerationStamp()); if (!oldMetaFile.renameTo(tmpMetaFile)) { throw new IOException("Cannot rename block meta file to " + tmpMetaFile); } long oldFileLength = blockFile.length(); if (newblock.getNumBytes() < oldFileLength) { truncateBlock(blockFile, tmpMetaFile, oldFileLength, newblock.getNumBytes()); ActiveFile file = volumeMap.getOngoingCreates(namespaceId, oldblock); if (file != null) { file.setBytesAcked(newblock.getNumBytes()); file.setBytesOnDisk(newblock.getNumBytes()); } else { // This should never happen unless called from unit tests. this.getDatanodeBlockInfo(namespaceId, oldblock).syncInMemorySize(); } } //rename the tmp file to the new meta file (with new generation stamp) File newMetaFile = getMetaFile(blockFile, newblock); if (!tmpMetaFile.renameTo(newMetaFile)) { throw new IOException("Cannot rename tmp meta file to " + newMetaFile); } if(volumeMap.getOngoingCreates(namespaceId, oldblock) != null){ ActiveFile af = volumeMap.removeOngoingCreates(namespaceId, oldblock); volumeMap.addOngoingCreates(namespaceId, newblock, af); } volumeMap.update(namespaceId, oldblock, newblock); // paranoia! verify that the contents of the stored block // matches the block file on disk. validateBlockMetadata(namespaceId, newblock); return null; } finally { lock.writeLock().unlock(); } }
Example 18
Source File: DataBlockScanner.java From hadoop-gpu with Apache License 2.0 | 4 votes |
static String newEnry(Block block, long time) { return "date=\"" + dateFormat.format(new Date(time)) + "\"\t " + "time=\"" + time + "\"\t " + "genstamp=\"" + block.getGenerationStamp() + "\"\t " + "id=\"" + block.getBlockId() +"\""; }
Example 19
Source File: ReplicaInfo.java From hadoop with Apache License 2.0 | 2 votes |
/** * Constructor * @param block a block * @param vol volume where replica is located * @param dir directory path where block and meta files are located */ ReplicaInfo(Block block, FsVolumeSpi vol, File dir) { this(block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(), vol, dir); }
Example 20
Source File: ReplicaInPipeline.java From hadoop with Apache License 2.0 | 2 votes |
/** * Constructor * @param block a block * @param vol volume where replica is located * @param dir directory path where block and meta files are located * @param writer a thread that is writing to this replica */ ReplicaInPipeline(Block block, FsVolumeSpi vol, File dir, Thread writer) { this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(), vol, dir, writer, 0L); }