org.apache.hadoop.hdfs.protocol.LocatedBlocks Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.protocol.LocatedBlocks.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestRetryCacheWithHA.java From hadoop with Apache License 2.0 | 7 votes |
@Override void prepare() throws Exception { final Path filePath = new Path(file); DFSTestUtil.createFile(dfs, filePath, BlockSize, DataNodes, 0); // append to the file and leave the last block under construction out = this.client.append(file, BlockSize, EnumSet.of(CreateFlag.APPEND), null, null); byte[] appendContent = new byte[100]; new Random().nextBytes(appendContent); out.write(appendContent); ((HdfsDataOutputStream) out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); LocatedBlocks blks = dfs.getClient() .getLocatedBlocks(file, BlockSize + 1); assertEquals(1, blks.getLocatedBlocks().size()); nodes = blks.get(0).getLocations(); oldBlock = blks.get(0).getBlock(); LocatedBlock newLbk = client.getNamenode().updateBlockForPipeline( oldBlock, client.getClientName()); newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(), oldBlock.getBlockId(), oldBlock.getNumBytes(), newLbk.getBlock().getGenerationStamp()); }
Example #2
Source File: TestLazyPersistFiles.java From hadoop with Apache License 2.0 | 6 votes |
/** * Delete lazy-persist file that has been persisted to disk * Both memory blocks and disk blocks are deleted. * @throws IOException * @throws InterruptedException */ @Test public void testDeleteAfterPersist() throws Exception { startUpCluster(true, -1); final String METHOD_NAME = GenericTestUtils.getMethodName(); Path path = new Path("/" + METHOD_NAME + ".dat"); makeTestFile(path, BLOCK_SIZE, true); LocatedBlocks locatedBlocks = ensureFileReplicasOnStorageType(path, RAM_DISK); // Sleep for a short time to allow the lazy writer thread to do its job Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000); // Delete after persist client.delete(path.toString(), false); Assert.assertFalse(fs.exists(path)); assertThat(verifyDeletedBlocks(locatedBlocks), is(true)); verifyRamDiskJMXMetric("RamDiskBlocksLazyPersisted", 1); verifyRamDiskJMXMetric("RamDiskBytesLazyPersisted", BLOCK_SIZE); }
Example #3
Source File: FileFixer.java From RDFS with Apache License 2.0 | 6 votes |
/** * Returns the corrupt blocks in a file. **/ List<LocatedBlock> corruptBlocksInFile( DistributedFileSystem fs, String uriPath, FileStatus stat) throws IOException { List<LocatedBlock> corrupt = new LinkedList<LocatedBlock>(); LocatedBlocks locatedBlocks = fs.getClient().namenode.getBlockLocations( uriPath, 0, stat.getLen()); for (LocatedBlock b: locatedBlocks.getLocatedBlocks()) { if (b.isCorrupt() || (b.getLocations().length == 0 && b.getBlockSize() > 0)) { LOG.info("Adding bad block for file " + uriPath); corrupt.add(b); } } return corrupt; }
Example #4
Source File: FileDataServlet.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** Select a datanode to service this request. * Currently, this looks at no more than the first five blocks of a file, * selecting a datanode randomly from the most represented. */ private static DatanodeID pickSrcDatanode(FileStatus i, ClientProtocol nnproxy) throws IOException { // a race condition can happen by initializing a static member this way. // A proper fix should make JspHelper a singleton. Since it doesn't affect // correctness, we leave it as is for now. if (jspHelper == null) jspHelper = new JspHelper(); final LocatedBlocks blks = nnproxy.getBlockLocations( i.getPath().toUri().getPath(), 0, 1); if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) { // pick a random datanode return jspHelper.randomNode(); } return jspHelper.bestNode(blks.get(0)); }
Example #5
Source File: DistributedAvatarFileSystem.java From RDFS with Apache License 2.0 | 6 votes |
@Override public LocatedBlock addBlock(final String src, final String clientName, final DatanodeInfo[] excludedNodes) throws IOException { return (new MutableFSCaller<LocatedBlock>() { @Override LocatedBlock call(int retries) throws IOException { if (retries > 0) { FileStatus info = namenode.getFileInfo(src); if (info != null) { LocatedBlocks blocks = namenode.getBlockLocations(src, 0, info .getLen()); // If atleast one block exists. if (blocks.locatedBlockCount() > 0) { LocatedBlock last = blocks.get(blocks.locatedBlockCount() - 1); if (last.getBlockSize() == 0) { // This one has not been written to namenode.abandonBlock(last.getBlock(), src, clientName); } } } } return namenode.addBlock(src, clientName, excludedNodes); } }).callFS(); }
Example #6
Source File: ClientNamenodeProtocolServerSideTranslatorPB.java From hadoop with Apache License 2.0 | 6 votes |
@Override public GetBlockLocationsResponseProto getBlockLocations( RpcController controller, GetBlockLocationsRequestProto req) throws ServiceException { try { LocatedBlocks b = server.getBlockLocations(req.getSrc(), req.getOffset(), req.getLength()); Builder builder = GetBlockLocationsResponseProto .newBuilder(); if (b != null) { builder.setLocations(PBHelper.convert(b)).build(); } return builder.build(); } catch (IOException e) { throw new ServiceException(e); } }
Example #7
Source File: ClientNamenodeProtocolTranslatorPB.java From hadoop with Apache License 2.0 | 6 votes |
@Override public LocatedBlocks getBlockLocations(String src, long offset, long length) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { GetBlockLocationsRequestProto req = GetBlockLocationsRequestProto .newBuilder() .setSrc(src) .setOffset(offset) .setLength(length) .build(); try { GetBlockLocationsResponseProto resp = rpcProxy.getBlockLocations(null, req); return resp.hasLocations() ? PBHelper.convert(resp.getLocations()) : null; } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } }
Example #8
Source File: TestDFSClientRetries.java From hadoop with Apache License 2.0 | 6 votes |
private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) { LocatedBlock goodLocatedBlock = goodBlockList.get(0); LocatedBlock badLocatedBlock = new LocatedBlock( goodLocatedBlock.getBlock(), new DatanodeInfo[] { DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234) }, goodLocatedBlock.getStartOffset(), false); List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>(); badBlocks.add(badLocatedBlock); return new LocatedBlocks(goodBlockList.getFileLength(), false, badBlocks, null, true, null); }
Example #9
Source File: ClientNamenodeProtocolServerSideTranslatorPB.java From big-c with Apache License 2.0 | 6 votes |
@Override public GetBlockLocationsResponseProto getBlockLocations( RpcController controller, GetBlockLocationsRequestProto req) throws ServiceException { try { LocatedBlocks b = server.getBlockLocations(req.getSrc(), req.getOffset(), req.getLength()); Builder builder = GetBlockLocationsResponseProto .newBuilder(); if (b != null) { builder.setLocations(PBHelper.convert(b)).build(); } return builder.build(); } catch (IOException e) { throw new ServiceException(e); } }
Example #10
Source File: TestLazyPersistFiles.java From big-c with Apache License 2.0 | 6 votes |
/** * Delete lazy-persist file that has been persisted to disk * Both memory blocks and disk blocks are deleted. * @throws IOException * @throws InterruptedException */ @Test public void testDeleteAfterPersist() throws Exception { startUpCluster(true, -1); final String METHOD_NAME = GenericTestUtils.getMethodName(); Path path = new Path("/" + METHOD_NAME + ".dat"); makeTestFile(path, BLOCK_SIZE, true); LocatedBlocks locatedBlocks = ensureFileReplicasOnStorageType(path, RAM_DISK); // Sleep for a short time to allow the lazy writer thread to do its job Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000); // Delete after persist client.delete(path.toString(), false); Assert.assertFalse(fs.exists(path)); assertThat(verifyDeletedBlocks(locatedBlocks), is(true)); verifyRamDiskJMXMetric("RamDiskBlocksLazyPersisted", 1); verifyRamDiskJMXMetric("RamDiskBytesLazyPersisted", BLOCK_SIZE); }
Example #11
Source File: TestBlockReorderMultiBlocks.java From hbase with Apache License 2.0 | 6 votes |
private void testFromDFS(DistributedFileSystem dfs, String src, int repCount, String localhost) throws Exception { // Multiple times as the order is random for (int i = 0; i < 10; i++) { LocatedBlocks l; // The NN gets the block list asynchronously, so we may need multiple tries to get the list final long max = System.currentTimeMillis() + 10000; boolean done; do { Assert.assertTrue("Can't get enouth replica.", System.currentTimeMillis() < max); l = getNamenode(dfs.getClient()).getBlockLocations(src, 0, 1); Assert.assertNotNull("Can't get block locations for " + src, l); Assert.assertNotNull(l.getLocatedBlocks()); Assert.assertTrue(l.getLocatedBlocks().size() > 0); done = true; for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) { done = (l.get(y).getLocations().length == repCount); } } while (!done); for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) { Assert.assertEquals(localhost, l.get(y).getLocations()[repCount - 1].getHostName()); } } }
Example #12
Source File: DFSUtil.java From big-c with Apache License 2.0 | 5 votes |
/** * Convert a LocatedBlocks to BlockLocations[] * @param blocks a LocatedBlocks * @return an array of BlockLocations */ public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) { if (blocks == null) { return new BlockLocation[0]; } return locatedBlocks2Locations(blocks.getLocatedBlocks()); }
Example #13
Source File: TestBlockToken.java From big-c with Apache License 2.0 | 5 votes |
/** * This test writes a file and gets the block locations without closing the * file, and tests the block token in the last block. Block token is verified * by ensuring it is of correct kind. * * @throws IOException * @throws InterruptedException */ @Test public void testBlockTokenInLastLocatedBlock() throws IOException, InterruptedException { Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1).build(); cluster.waitActive(); try { FileSystem fs = cluster.getFileSystem(); String fileName = "/testBlockTokenInLastLocatedBlock"; Path filePath = new Path(fileName); FSDataOutputStream out = fs.create(filePath, (short) 1); out.write(new byte[1000]); // ensure that the first block is written out (see FSOutputSummer#flush) out.flush(); LocatedBlocks locatedBlocks = cluster.getNameNodeRpc().getBlockLocations( fileName, 0, 1000); while (locatedBlocks.getLastLocatedBlock() == null) { Thread.sleep(100); locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(fileName, 0, 1000); } Token<BlockTokenIdentifier> token = locatedBlocks.getLastLocatedBlock() .getBlockToken(); Assert.assertEquals(BlockTokenIdentifier.KIND_NAME, token.getKind()); out.close(); } finally { cluster.shutdown(); } }
Example #14
Source File: PBHelper.java From hadoop with Apache License 2.0 | 5 votes |
public static HdfsFileStatusProto convert(HdfsFileStatus fs) { if (fs == null) return null; FileType fType = FileType.IS_FILE; if (fs.isDir()) { fType = FileType.IS_DIR; } else if (fs.isSymlink()) { fType = FileType.IS_SYMLINK; } HdfsFileStatusProto.Builder builder = HdfsFileStatusProto.newBuilder(). setLength(fs.getLen()). setFileType(fType). setBlockReplication(fs.getReplication()). setBlocksize(fs.getBlockSize()). setModificationTime(fs.getModificationTime()). setAccessTime(fs.getAccessTime()). setPermission(PBHelper.convert(fs.getPermission())). setOwner(fs.getOwner()). setGroup(fs.getGroup()). setFileId(fs.getFileId()). setChildrenNum(fs.getChildrenNum()). setPath(ByteString.copyFrom(fs.getLocalNameInBytes())). setStoragePolicy(fs.getStoragePolicy()); if (fs.isSymlink()) { builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())); } if (fs.getFileEncryptionInfo() != null) { builder.setFileEncryptionInfo(convert(fs.getFileEncryptionInfo())); } if (fs instanceof HdfsLocatedFileStatus) { final HdfsLocatedFileStatus lfs = (HdfsLocatedFileStatus) fs; LocatedBlocks locations = lfs.getBlockLocations(); if (locations != null) { builder.setLocations(PBHelper.convert(locations)); } } return builder.build(); }
Example #15
Source File: TestBlockMissingException.java From RDFS with Apache License 2.0 | 5 votes |
/** * Test DFS Raid */ public void testBlockMissingException() throws Exception { LOG.info("Test testBlockMissingException started."); long blockSize = 1024L; int numBlocks = 4; conf = new Configuration(); try { dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null); dfs.waitActive(); fileSys = (DistributedFileSystem)dfs.getFileSystem(); Path file1 = new Path("/user/dhruba/raidtest/file1"); createOldFile(fileSys, file1, 1, numBlocks, blockSize); // extract block locations from File system. Wait till file is closed. LocatedBlocks locations = null; locations = fileSys.dfs.namenode.getBlockLocations(file1.toString(), 0, numBlocks * blockSize); // remove block of file LOG.info("Remove first block of file"); corruptBlock(file1, locations.get(0).getBlock()); // validate that the system throws BlockMissingException validateFile(fileSys, file1); } finally { if (fileSys != null) fileSys.close(); if (dfs != null) dfs.shutdown(); } LOG.info("Test testBlockMissingException completed."); }
Example #16
Source File: PBHelper.java From hadoop with Apache License 2.0 | 5 votes |
public static LocatedBlocks convert(LocatedBlocksProto lb) { return new LocatedBlocks( lb.getFileLength(), lb.getUnderConstruction(), PBHelper.convertLocatedBlock(lb.getBlocksList()), lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null, lb.getIsLastBlockComplete(), lb.hasFileEncryptionInfo() ? convert(lb.getFileEncryptionInfo()) : null); }
Example #17
Source File: DFSClient.java From hadoop with Apache License 2.0 | 5 votes |
/** * @see ClientProtocol#getBlockLocations(String, long, long) */ static LocatedBlocks callGetBlockLocations(ClientProtocol namenode, String src, long start, long length) throws IOException { try { return namenode.getBlockLocations(src, start, length); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, UnresolvedPathException.class); } }
Example #18
Source File: DistributedRaidFileSystem.java From RDFS with Apache License 2.0 | 5 votes |
private static long getFileSize(LocatedBlocks lbs) throws IOException { List<LocatedBlock> locatedBlocks = lbs.getLocatedBlocks(); long fileSize = 0; for (LocatedBlock lb: locatedBlocks) { fileSize += lb.getBlockSize(); } if (fileSize != lbs.getFileLength()) { throw new IOException("lbs.getFileLength() " + lbs.getFileLength() + " does not match sum of block sizes " + fileSize); } return fileSize; }
Example #19
Source File: TestFileCreation.java From hadoop with Apache License 2.0 | 5 votes |
private void assertBlocks(BlockManager bm, LocatedBlocks lbs, boolean exist) { for (LocatedBlock locatedBlock : lbs.getLocatedBlocks()) { if (exist) { assertTrue(bm.getStoredBlock(locatedBlock.getBlock(). getLocalBlock()) != null); } else { assertTrue(bm.getStoredBlock(locatedBlock.getBlock(). getLocalBlock()) == null); } } }
Example #20
Source File: TestAvatarDataNodeRBW.java From RDFS with Apache License 2.0 | 5 votes |
private boolean blocksReceived(int nBlocks, String fileName) throws IOException { AvatarNode avatar = cluster.getPrimaryAvatar(0).avatar; LocatedBlocks lbks = avatar.namesystem.getBlockLocations(fileName, 0, Long.MAX_VALUE); int blocks = lbks.locatedBlockCount(); if (blocks != nBlocks) return false; for (LocatedBlock lbk : lbks.getLocatedBlocks()) { DatanodeInfo[] locs = lbk.getLocations(); if (locs == null || locs.length == 0) { return false; } } return true; }
Example #21
Source File: TestRaidDfs.java From RDFS with Apache License 2.0 | 5 votes |
private void corruptBlockAndValidate(Path srcFile, Path destPath, int[] listBlockNumToCorrupt, long blockSize, int numBlocks, MiniDFSCluster cluster) throws IOException, InterruptedException { RaidDFSUtil.cleanUp(fileSys, srcFile.getParent()); fileSys.mkdirs(srcFile.getParent()); int repl = 1; long crc = createTestFilePartialLastBlock(fileSys, srcFile, repl, numBlocks, blockSize); long length = fileSys.getFileStatus(srcFile).getLen(); if (codec.isDirRaid) { RaidNode.doRaid(conf, fileSys.getFileStatus(srcFile.getParent()), destPath, codec, new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE, false, repl, repl); } else { RaidNode.doRaid(conf, fileSys.getFileStatus(srcFile), destPath, codec, new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE, false, repl, repl); } // Delete first block of file for (int blockNumToCorrupt : listBlockNumToCorrupt) { LOG.info("Corrupt block " + blockNumToCorrupt + " of file " + srcFile); LocatedBlocks locations = getBlockLocations(srcFile); corruptBlock(srcFile, locations.get(blockNumToCorrupt).getBlock(), NUM_DATANODES, true, cluster); } // Validate DistributedRaidFileSystem raidfs = getRaidFS(); assertTrue(validateFile(raidfs, srcFile, length, crc)); }
Example #22
Source File: NameNodeRpcServer.java From big-c with Apache License 2.0 | 5 votes |
@Override // ClientProtocol public LocatedBlocks getBlockLocations(String src, long offset, long length) throws IOException { checkNNStartup(); metrics.incrGetBlockLocations(); return namesystem.getBlockLocations(getClientMachine(), src, offset, length); }
Example #23
Source File: DFSInputStream.java From big-c with Apache License 2.0 | 5 votes |
/** Fetch a block from namenode and cache it */ private void fetchBlockAt(long offset) throws IOException { synchronized(infoLock) { int targetBlockIdx = locatedBlocks.findBlock(offset); if (targetBlockIdx < 0) { // block is not cached targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx); } // fetch blocks final LocatedBlocks newBlocks = dfsClient.getLocatedBlocks(src, offset); if (newBlocks == null) { throw new IOException("Could not find target position " + offset); } locatedBlocks.insertRange(targetBlockIdx, newBlocks.getLocatedBlocks()); } }
Example #24
Source File: TestLazyPersistFiles.java From hadoop with Apache License 2.0 | 5 votes |
/** * File partially fit in RamDisk after eviction. * RamDisk can fit 2 blocks. Write a file with 5 blocks. * Expect 2 or less blocks are on RamDisk and 3 or more on disk. * @throws IOException */ @Test public void testFallbackToDiskPartial() throws IOException, InterruptedException { startUpCluster(true, 2); final String METHOD_NAME = GenericTestUtils.getMethodName(); Path path = new Path("/" + METHOD_NAME + ".dat"); makeTestFile(path, BLOCK_SIZE * 5, true); // Sleep for a short time to allow the lazy writer thread to do its job Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000); triggerBlockReport(); int numBlocksOnRamDisk = 0; int numBlocksOnDisk = 0; long fileLength = client.getFileInfo(path.toString()).getLen(); LocatedBlocks locatedBlocks = client.getLocatedBlocks(path.toString(), 0, fileLength); for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) { if (locatedBlock.getStorageTypes()[0] == RAM_DISK) { numBlocksOnRamDisk++; } else if (locatedBlock.getStorageTypes()[0] == DEFAULT) { numBlocksOnDisk++; } } // Since eviction is asynchronous, depending on the timing of eviction // wrt writes, we may get 2 or less blocks on RAM disk. assert(numBlocksOnRamDisk <= 2); assert(numBlocksOnDisk >= 3); }
Example #25
Source File: TestStandbyIsHot.java From big-c with Apache License 2.0 | 5 votes |
static void waitForBlockLocations(final MiniDFSCluster cluster, final NameNode nn, final String path, final int expectedReplicas) throws Exception { GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { try { LocatedBlocks locs = NameNodeAdapter.getBlockLocations(nn, path, 0, 1000); DatanodeInfo[] dnis = locs.getLastLocatedBlock().getLocations(); for (DatanodeInfo dni : dnis) { Assert.assertNotNull(dni); } int numReplicas = dnis.length; LOG.info("Got " + numReplicas + " locs: " + locs); if (numReplicas > expectedReplicas) { cluster.triggerDeletionReports(); } cluster.triggerHeartbeats(); return numReplicas == expectedReplicas; } catch (IOException e) { LOG.warn("No block locations yet: " + e.getMessage()); return false; } } }, 500, 20000); }
Example #26
Source File: TestInterDatanodeProtocol.java From hadoop-gpu with Apache License 2.0 | 5 votes |
public static LocatedBlock getLastLocatedBlock( ClientProtocol namenode, String src ) throws IOException { //get block info for the last block LocatedBlocks locations = namenode.getBlockLocations(src, 0, Long.MAX_VALUE); List<LocatedBlock> blocks = locations.getLocatedBlocks(); DataNode.LOG.info("blocks.size()=" + blocks.size()); assertTrue(blocks.size() > 0); return blocks.get(blocks.size() - 1); }
Example #27
Source File: TestFileTruncate.java From hadoop with Apache License 2.0 | 5 votes |
/** * The last block is truncated at mid. (non copy-on-truncate) * shutdown the datanodes immediately after truncate. */ @Test(timeout=60000) public void testTruncateWithDataNodesShutdownImmediately() throws Exception { int startingFileSize = 3 * BLOCK_SIZE; byte[] contents = AppendTestUtil.initBuffer(startingFileSize); final Path parent = new Path("/test"); final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately"); writeContents(contents, startingFileSize, p); int toTruncateLength = 1; int newLength = startingFileSize - toTruncateLength; boolean isReady = fs.truncate(p, newLength); assertFalse(isReady); cluster.shutdownDataNodes(); cluster.setDataNodesDead(); try { for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) { Thread.sleep(SLEEP); } assertFalse("All DataNodes should be down.", cluster.isDataNodeUp()); LocatedBlocks blocks = getLocatedBlocks(p); assertTrue(blocks.isUnderConstruction()); } finally { cluster.startDataNodes(conf, DATANODE_NUM, true, StartupOption.REGULAR, null); cluster.waitActive(); } checkBlockRecovery(p); fs.delete(parent, true); }
Example #28
Source File: DFSClient.java From big-c with Apache License 2.0 | 5 votes |
@VisibleForTesting public LocatedBlocks getLocatedBlocks(String src, long start, long length) throws IOException { TraceScope scope = getPathTraceScope("getBlockLocations", src); try { return callGetBlockLocations(namenode, src, start, length); } finally { scope.close(); } }
Example #29
Source File: DFSClient.java From RDFS with Apache License 2.0 | 5 votes |
/** * Convert an HdfsFileStatus and its block locations to a LocatedFileStatus * @param stat an HdfsFileStatus * @param locs the file's block locations * @param src parent path in string representation * @return a FileStatus object */ private static LocatedFileStatus toLocatedFileStatus( HdfsFileStatus stat, LocatedBlocks locs, String src) { if (stat == null) { return null; } return new LocatedFileStatus(stat.getLen(), stat.isDir(), stat.getReplication(), stat.getBlockSize(), stat.getModificationTime(), stat.getAccessTime(), stat.getPermission(), stat.getOwner(), stat.getGroup(), stat.getFullPath(new Path(src)), // full path DFSUtil.locatedBlocks2Locations(locs)); }
Example #30
Source File: LazyPersistTestCase.java From hadoop with Apache License 2.0 | 5 votes |
protected final boolean verifyDeletedBlocks(LocatedBlocks locatedBlocks) throws IOException, InterruptedException { LOG.info("Verifying replica has no saved copy after deletion."); triggerBlockReport(); while( DataNodeTestUtils.getPendingAsyncDeletions(cluster.getDataNodes().get(0)) > 0L){ Thread.sleep(1000); } final String bpid = cluster.getNamesystem().getBlockPoolId(); List<? extends FsVolumeSpi> volumes = cluster.getDataNodes().get(0).getFSDataset().getVolumes(); // Make sure deleted replica does not have a copy on either finalized dir of // transient volume or finalized dir of non-transient volume for (FsVolumeSpi v : volumes) { FsVolumeImpl volume = (FsVolumeImpl) v; File targetDir = (v.isTransientStorage()) ? volume.getBlockPoolSlice(bpid).getFinalizedDir() : volume.getBlockPoolSlice(bpid).getLazypersistDir(); if (verifyBlockDeletedFromDir(targetDir, locatedBlocks) == false) { return false; } } return true; }