Java Code Examples for org.apache.hadoop.hdfs.server.datanode.DatanodeUtil#idToBlockDir()
The following examples show how to use
org.apache.hadoop.hdfs.server.datanode.DatanodeUtil#idToBlockDir() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BlockPoolSlice.java From lucene-solr with Apache License 2.0 | 6 votes |
/** * Move a persisted replica from lazypersist directory to a subdirectory * under finalized. */ ReplicaInfo activateSavedReplica(ReplicaInfo replicaInfo, RamDiskReplica replicaState) throws IOException { File metaFile = replicaState.getSavedMetaFile(); File blockFile = replicaState.getSavedBlockFile(); final long blockId = replicaInfo.getBlockId(); final File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId); final File targetBlockFile = new File(blockDir, blockFile.getName()); final File targetMetaFile = new File(blockDir, metaFile.getName()); fileIoProvider.moveFile(volume, blockFile, targetBlockFile); FsDatasetImpl.LOG.info("Moved " + blockFile + " to " + targetBlockFile); fileIoProvider.moveFile(volume, metaFile, targetMetaFile); FsDatasetImpl.LOG.info("Moved " + metaFile + " to " + targetMetaFile); ReplicaInfo newReplicaInfo = new ReplicaBuilder(ReplicaState.FINALIZED) .setBlockId(blockId) .setLength(replicaInfo.getBytesOnDisk()) .setGenerationStamp(replicaInfo.getGenerationStamp()) .setFsVolume(replicaState.getLazyPersistVolume()) .setDirectoryToUse(targetBlockFile.getParentFile()) .build(); return newReplicaInfo; }
Example 2
Source File: LazyPersistTestCase.java From big-c with Apache License 2.0 | 6 votes |
protected final boolean verifyBlockDeletedFromDir(File dir, LocatedBlocks locatedBlocks) { for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) { File targetDir = DatanodeUtil.idToBlockDir(dir, lb.getBlock().getBlockId()); File blockFile = new File(targetDir, lb.getBlock().getBlockName()); if (blockFile.exists()) { LOG.warn("blockFile: " + blockFile.getAbsolutePath() + " exists after deletion."); return false; } File metaFile = new File(targetDir, DatanodeUtil.getMetaName(lb.getBlock().getBlockName(), lb.getBlock().getGenerationStamp())); if (metaFile.exists()) { LOG.warn("metaFile: " + metaFile.getAbsolutePath() + " exists after deletion."); return false; } } return true; }
Example 3
Source File: LazyPersistTestCase.java From hadoop with Apache License 2.0 | 6 votes |
protected final boolean verifyBlockDeletedFromDir(File dir, LocatedBlocks locatedBlocks) { for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) { File targetDir = DatanodeUtil.idToBlockDir(dir, lb.getBlock().getBlockId()); File blockFile = new File(targetDir, lb.getBlock().getBlockName()); if (blockFile.exists()) { LOG.warn("blockFile: " + blockFile.getAbsolutePath() + " exists after deletion."); return false; } File metaFile = new File(targetDir, DatanodeUtil.getMetaName(lb.getBlock().getBlockName(), lb.getBlock().getGenerationStamp())); if (metaFile.exists()) { LOG.warn("metaFile: " + metaFile.getAbsolutePath() + " exists after deletion."); return false; } } return true; }
Example 4
Source File: FsDatasetImpl.java From big-c with Apache License 2.0 | 5 votes |
private File[] copyReplicaWithNewBlockIdAndGS( ReplicaUnderRecovery replicaInfo, String bpid, long newBlkId, long newGS) throws IOException { String blockFileName = Block.BLOCK_FILE_PREFIX + newBlkId; FsVolumeReference v = volumes.getNextVolume( replicaInfo.getVolume().getStorageType(), replicaInfo.getNumBytes()); final File tmpDir = ((FsVolumeImpl) v.getVolume()) .getBlockPoolSlice(bpid).getTmpDir(); final File destDir = DatanodeUtil.idToBlockDir(tmpDir, newBlkId); final File dstBlockFile = new File(destDir, blockFileName); final File dstMetaFile = FsDatasetUtil.getMetaFile(dstBlockFile, newGS); return copyBlockFiles(replicaInfo.getMetaFile(), replicaInfo.getBlockFile(), dstMetaFile, dstBlockFile, true); }
Example 5
Source File: LazyPersistTestCase.java From big-c with Apache License 2.0 | 5 votes |
/** * Make sure at least one non-transient volume has a saved copy of the replica. * An infinite loop is used to ensure the async lazy persist tasks are completely * done before verification. Caller of ensureLazyPersistBlocksAreSaved expects * either a successful pass or timeout failure. */ protected final void ensureLazyPersistBlocksAreSaved( LocatedBlocks locatedBlocks) throws IOException, InterruptedException { final String bpid = cluster.getNamesystem().getBlockPoolId(); List<? extends FsVolumeSpi> volumes = cluster.getDataNodes().get(0).getFSDataset().getVolumes(); final Set<Long> persistedBlockIds = new HashSet<Long>(); while (persistedBlockIds.size() < locatedBlocks.getLocatedBlocks().size()) { // Take 1 second sleep before each verification iteration Thread.sleep(1000); for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) { for (FsVolumeSpi v : volumes) { if (v.isTransientStorage()) { continue; } FsVolumeImpl volume = (FsVolumeImpl) v; File lazyPersistDir = volume.getBlockPoolSlice(bpid).getLazypersistDir(); long blockId = lb.getBlock().getBlockId(); File targetDir = DatanodeUtil.idToBlockDir(lazyPersistDir, blockId); File blockFile = new File(targetDir, lb.getBlock().getBlockName()); if (blockFile.exists()) { // Found a persisted copy for this block and added to the Set persistedBlockIds.add(blockId); } } } } // We should have found a persisted copy for each located block. assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size())); }
Example 6
Source File: BlockPoolSlice.java From big-c with Apache License 2.0 | 5 votes |
/** * Move a persisted replica from lazypersist directory to a subdirectory * under finalized. */ File activateSavedReplica(Block b, File metaFile, File blockFile) throws IOException { final File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId()); final File targetBlockFile = new File(blockDir, blockFile.getName()); final File targetMetaFile = new File(blockDir, metaFile.getName()); FileUtils.moveFile(blockFile, targetBlockFile); FsDatasetImpl.LOG.info("Moved " + blockFile + " to " + targetBlockFile); FileUtils.moveFile(metaFile, targetMetaFile); FsDatasetImpl.LOG.info("Moved " + metaFile + " to " + targetMetaFile); return targetBlockFile; }
Example 7
Source File: BlockPoolSlice.java From big-c with Apache License 2.0 | 5 votes |
File addBlock(Block b, File f) throws IOException { File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId()); if (!blockDir.exists()) { if (!blockDir.mkdirs()) { throw new IOException("Failed to mkdirs " + blockDir); } } File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir); File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp()); dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length()); return blockFile; }
Example 8
Source File: FsDatasetImpl.java From hadoop with Apache License 2.0 | 5 votes |
/** * Copy the block and meta files for the given block to the given destination. * @return the new meta and block files. * @throws IOException */ static File[] copyBlockFiles(long blockId, long genStamp, File srcMeta, File srcFile, File destRoot, boolean calculateChecksum) throws IOException { final File destDir = DatanodeUtil.idToBlockDir(destRoot, blockId); final File dstFile = new File(destDir, srcFile.getName()); final File dstMeta = FsDatasetUtil.getMetaFile(dstFile, genStamp); return copyBlockFiles(srcMeta, srcFile, dstMeta, dstFile, calculateChecksum); }
Example 9
Source File: FsDatasetImpl.java From big-c with Apache License 2.0 | 5 votes |
/** * Copy the block and meta files for the given block to the given destination. * @return the new meta and block files. * @throws IOException */ static File[] copyBlockFiles(long blockId, long genStamp, File srcMeta, File srcFile, File destRoot, boolean calculateChecksum) throws IOException { final File destDir = DatanodeUtil.idToBlockDir(destRoot, blockId); final File dstFile = new File(destDir, srcFile.getName()); final File dstMeta = FsDatasetUtil.getMetaFile(dstFile, genStamp); return copyBlockFiles(srcMeta, srcFile, dstMeta, dstFile, calculateChecksum); }
Example 10
Source File: LazyPersistTestCase.java From hadoop with Apache License 2.0 | 5 votes |
/** * Make sure at least one non-transient volume has a saved copy of the replica. * An infinite loop is used to ensure the async lazy persist tasks are completely * done before verification. Caller of ensureLazyPersistBlocksAreSaved expects * either a successful pass or timeout failure. */ protected final void ensureLazyPersistBlocksAreSaved( LocatedBlocks locatedBlocks) throws IOException, InterruptedException { final String bpid = cluster.getNamesystem().getBlockPoolId(); List<? extends FsVolumeSpi> volumes = cluster.getDataNodes().get(0).getFSDataset().getVolumes(); final Set<Long> persistedBlockIds = new HashSet<Long>(); while (persistedBlockIds.size() < locatedBlocks.getLocatedBlocks().size()) { // Take 1 second sleep before each verification iteration Thread.sleep(1000); for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) { for (FsVolumeSpi v : volumes) { if (v.isTransientStorage()) { continue; } FsVolumeImpl volume = (FsVolumeImpl) v; File lazyPersistDir = volume.getBlockPoolSlice(bpid).getLazypersistDir(); long blockId = lb.getBlock().getBlockId(); File targetDir = DatanodeUtil.idToBlockDir(lazyPersistDir, blockId); File blockFile = new File(targetDir, lb.getBlock().getBlockName()); if (blockFile.exists()) { // Found a persisted copy for this block and added to the Set persistedBlockIds.add(blockId); } } } } // We should have found a persisted copy for each located block. assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size())); }
Example 11
Source File: BlockPoolSlice.java From hadoop with Apache License 2.0 | 5 votes |
/** * Move a persisted replica from lazypersist directory to a subdirectory * under finalized. */ File activateSavedReplica(Block b, File metaFile, File blockFile) throws IOException { final File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId()); final File targetBlockFile = new File(blockDir, blockFile.getName()); final File targetMetaFile = new File(blockDir, metaFile.getName()); FileUtils.moveFile(blockFile, targetBlockFile); FsDatasetImpl.LOG.info("Moved " + blockFile + " to " + targetBlockFile); FileUtils.moveFile(metaFile, targetMetaFile); FsDatasetImpl.LOG.info("Moved " + metaFile + " to " + targetMetaFile); return targetBlockFile; }
Example 12
Source File: BlockPoolSlice.java From hadoop with Apache License 2.0 | 5 votes |
File addBlock(Block b, File f) throws IOException { File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId()); if (!blockDir.exists()) { if (!blockDir.mkdirs()) { throw new IOException("Failed to mkdirs " + blockDir); } } File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir); File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp()); dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length()); return blockFile; }
Example 13
Source File: FsDatasetImpl.java From hadoop with Apache License 2.0 | 5 votes |
private File[] copyReplicaWithNewBlockIdAndGS( ReplicaUnderRecovery replicaInfo, String bpid, long newBlkId, long newGS) throws IOException { String blockFileName = Block.BLOCK_FILE_PREFIX + newBlkId; FsVolumeReference v = volumes.getNextVolume( replicaInfo.getVolume().getStorageType(), replicaInfo.getNumBytes()); final File tmpDir = ((FsVolumeImpl) v.getVolume()) .getBlockPoolSlice(bpid).getTmpDir(); final File destDir = DatanodeUtil.idToBlockDir(tmpDir, newBlkId); final File dstBlockFile = new File(destDir, blockFileName); final File dstMetaFile = FsDatasetUtil.getMetaFile(dstBlockFile, newGS); return copyBlockFiles(replicaInfo.getMetaFile(), replicaInfo.getBlockFile(), dstMetaFile, dstBlockFile, true); }
Example 14
Source File: BlockPoolSlice.java From lucene-solr with Apache License 2.0 | 4 votes |
File addFinalizedBlock(Block b, ReplicaInfo replicaInfo) throws IOException { File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId()); fileIoProvider.mkdirsWithExistsCheck(volume, blockDir); return FsDatasetImpl.moveBlockFiles(b, replicaInfo, blockDir); }
Example 15
Source File: MiniDFSCluster.java From hadoop with Apache License 2.0 | 2 votes |
/** * Get the latest metadata file correpsonding to a block * @param storageDir storage directory * @param blk the block * @return metadata file corresponding to the block */ public static File getBlockMetadataFile(File storageDir, ExtendedBlock blk) { return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir, blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName() + "_" + blk.getGenerationStamp() + Block.METADATA_EXTENSION); }
Example 16
Source File: MiniDFSCluster.java From hadoop with Apache License 2.0 | 2 votes |
/** * Get file correpsonding to a block * @param storageDir storage directory * @param blk the block * @return data file corresponding to the block */ public static File getBlockFile(File storageDir, ExtendedBlock blk) { return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir, blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName()); }
Example 17
Source File: MiniDFSCluster.java From big-c with Apache License 2.0 | 2 votes |
/** * Get file correpsonding to a block * @param storageDir storage directory * @param blk the block * @return data file corresponding to the block */ public static File getBlockFile(File storageDir, ExtendedBlock blk) { return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir, blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName()); }
Example 18
Source File: MiniDFSCluster.java From big-c with Apache License 2.0 | 2 votes |
/** * Get the latest metadata file correpsonding to a block * @param storageDir storage directory * @param blk the block * @return metadata file corresponding to the block */ public static File getBlockMetadataFile(File storageDir, ExtendedBlock blk) { return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir, blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName() + "_" + blk.getGenerationStamp() + Block.METADATA_EXTENSION); }