org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ClientNamenodeProtocolServerSideTranslatorPB.java From hadoop with Apache License 2.0 | 6 votes |
@Override public ListCacheDirectivesResponseProto listCacheDirectives( RpcController controller, ListCacheDirectivesRequestProto request) throws ServiceException { try { CacheDirectiveInfo filter = PBHelper.convert(request.getFilter()); BatchedEntries<CacheDirectiveEntry> entries = server.listCacheDirectives(request.getPrevId(), filter); ListCacheDirectivesResponseProto.Builder builder = ListCacheDirectivesResponseProto.newBuilder(); builder.setHasMore(entries.hasMore()); for (int i=0, n=entries.size(); i<n; i++) { builder.addElements(PBHelper.convert(entries.get(i))); } return builder.build(); } catch (IOException e) { throw new ServiceException(e); } }
Example #2
Source File: TestRetryCacheWithHA.java From big-c with Apache License 2.0 | 6 votes |
@Override boolean checkNamenodeBeforeReturn() throws Exception { for (int i = 0; i < CHECKTIMES; i++) { RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives( new CacheDirectiveInfo.Builder(). setPool(directive.getPool()). setPath(directive.getPath()). build()); while (iter.hasNext()) { CacheDirectiveInfo result = iter.next().getInfo(); if ((result.getId() == id) && (result.getReplication().shortValue() == newReplication)) { return true; } } Thread.sleep(1000); } return false; }
Example #3
Source File: TestRetryCacheWithHA.java From hadoop with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") private void listCacheDirectives( HashSet<String> poolNames, int active) throws Exception { HashSet<String> tmpNames = (HashSet<String>)poolNames.clone(); RemoteIterator<CacheDirectiveEntry> directives = dfs.listCacheDirectives(null); int poolCount = poolNames.size(); for (int i=0; i<poolCount; i++) { CacheDirectiveEntry directive = directives.next(); String pollName = directive.getInfo().getPool(); assertTrue("The pool name should be expected", tmpNames.remove(pollName)); if (i % 2 == 0) { int standby = active; active = (standby == 0) ? 1 : 0; cluster.transitionToStandby(standby); cluster.transitionToActive(active); cluster.waitActive(active); } } assertTrue("All pools must be found", tmpNames.isEmpty()); }
Example #4
Source File: TestRetryCacheWithHA.java From hadoop with Apache License 2.0 | 6 votes |
@Override boolean checkNamenodeBeforeReturn() throws Exception { for (int i = 0; i < CHECKTIMES; i++) { RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives( new CacheDirectiveInfo.Builder(). setPool(directive.getPool()). setPath(directive.getPath()). build()); if (!iter.hasNext()) { return true; } Thread.sleep(1000); } return false; }
Example #5
Source File: TestRetryCacheWithHA.java From hadoop with Apache License 2.0 | 6 votes |
@Override boolean checkNamenodeBeforeReturn() throws Exception { for (int i = 0; i < CHECKTIMES; i++) { RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives( new CacheDirectiveInfo.Builder(). setPool(directive.getPool()). setPath(directive.getPath()). build()); while (iter.hasNext()) { CacheDirectiveInfo result = iter.next().getInfo(); if ((result.getId() == id) && (result.getReplication().shortValue() == newReplication)) { return true; } } Thread.sleep(1000); } return false; }
Example #6
Source File: TestRetryCacheWithHA.java From hadoop with Apache License 2.0 | 6 votes |
@Override boolean checkNamenodeBeforeReturn() throws Exception { for (int i = 0; i < CHECKTIMES; i++) { RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives( new CacheDirectiveInfo.Builder(). setPool(directive.getPool()). setPath(directive.getPath()). build()); if (iter.hasNext()) { return true; } Thread.sleep(1000); } return false; }
Example #7
Source File: TestRetryCacheWithHA.java From big-c with Apache License 2.0 | 6 votes |
@Override boolean checkNamenodeBeforeReturn() throws Exception { for (int i = 0; i < CHECKTIMES; i++) { RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives( new CacheDirectiveInfo.Builder(). setPool(directive.getPool()). setPath(directive.getPath()). build()); if (!iter.hasNext()) { return true; } Thread.sleep(1000); } return false; }
Example #8
Source File: ClientNamenodeProtocolTranslatorPB.java From hadoop with Apache License 2.0 | 6 votes |
@Override public BatchedEntries<CacheDirectiveEntry> listCacheDirectives(long prevId, CacheDirectiveInfo filter) throws IOException { if (filter == null) { filter = new CacheDirectiveInfo.Builder().build(); } try { return new BatchedCacheEntries( rpcProxy.listCacheDirectives(null, ListCacheDirectivesRequestProto.newBuilder(). setPrevId(prevId). setFilter(PBHelper.convert(filter)). build())); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } }
Example #9
Source File: TestRetryCacheWithHA.java From big-c with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") private void listCacheDirectives( HashSet<String> poolNames, int active) throws Exception { HashSet<String> tmpNames = (HashSet<String>)poolNames.clone(); RemoteIterator<CacheDirectiveEntry> directives = dfs.listCacheDirectives(null); int poolCount = poolNames.size(); for (int i=0; i<poolCount; i++) { CacheDirectiveEntry directive = directives.next(); String pollName = directive.getInfo().getPool(); assertTrue("The pool name should be expected", tmpNames.remove(pollName)); if (i % 2 == 0) { int standby = active; active = (standby == 0) ? 1 : 0; cluster.transitionToStandby(standby); cluster.transitionToActive(active); cluster.waitActive(active); } } assertTrue("All pools must be found", tmpNames.isEmpty()); }
Example #10
Source File: TestRetryCacheWithHA.java From big-c with Apache License 2.0 | 6 votes |
@Override boolean checkNamenodeBeforeReturn() throws Exception { for (int i = 0; i < CHECKTIMES; i++) { RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives( new CacheDirectiveInfo.Builder(). setPool(directive.getPool()). setPath(directive.getPath()). build()); if (iter.hasNext()) { return true; } Thread.sleep(1000); } return false; }
Example #11
Source File: CacheRegistry.java From nnproxy with Apache License 2.0 | 6 votes |
List<CacheDirectiveEntry> getAllCacheDirectives(UpstreamManager.Upstream upstream) throws IOException { CacheDirectiveInfo filter = new CacheDirectiveInfo.Builder().build(); List<CacheDirectiveEntry> directives = new ArrayList<>(); long prevId = -1; while (true) { BatchedRemoteIterator.BatchedEntries<CacheDirectiveEntry> it = upstream.protocol.listCacheDirectives(prevId, filter); if (it.size() == 0) { break; } for (int i = 0; i < it.size(); i++) { CacheDirectiveEntry entry = it.get(i); prevId = entry.getInfo().getId(); directives.add(entry); } } return directives; }
Example #12
Source File: ClientNamenodeProtocolTranslatorPB.java From big-c with Apache License 2.0 | 6 votes |
@Override public BatchedEntries<CacheDirectiveEntry> listCacheDirectives(long prevId, CacheDirectiveInfo filter) throws IOException { if (filter == null) { filter = new CacheDirectiveInfo.Builder().build(); } try { return new BatchedCacheEntries( rpcProxy.listCacheDirectives(null, ListCacheDirectivesRequestProto.newBuilder(). setPrevId(prevId). setFilter(PBHelper.convert(filter)). build())); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } }
Example #13
Source File: ClientNamenodeProtocolServerSideTranslatorPB.java From big-c with Apache License 2.0 | 6 votes |
@Override public ListCacheDirectivesResponseProto listCacheDirectives( RpcController controller, ListCacheDirectivesRequestProto request) throws ServiceException { try { CacheDirectiveInfo filter = PBHelper.convert(request.getFilter()); BatchedEntries<CacheDirectiveEntry> entries = server.listCacheDirectives(request.getPrevId(), filter); ListCacheDirectivesResponseProto.Builder builder = ListCacheDirectivesResponseProto.newBuilder(); builder.setHasMore(entries.hasMore()); for (int i=0, n=entries.size(); i<n; i++) { builder.addElements(PBHelper.convert(entries.get(i))); } return builder.build(); } catch (IOException e) { throw new ServiceException(e); } }
Example #14
Source File: PBHelper.java From big-c with Apache License 2.0 | 5 votes |
public static CacheDirectiveEntryProto convert(CacheDirectiveEntry entry) { CacheDirectiveEntryProto.Builder builder = CacheDirectiveEntryProto.newBuilder(); builder.setInfo(PBHelper.convert(entry.getInfo())); builder.setStats(PBHelper.convert(entry.getStats())); return builder.build(); }
Example #15
Source File: CacheRegistry.java From nnproxy with Apache License 2.0 | 5 votes |
List<CacheDirectiveEntry> maskWithFsIndex(List<CacheDirectiveEntry> entries, int fsIndex) { List<CacheDirectiveEntry> masked = new ArrayList<>(entries.size()); for (CacheDirectiveEntry entry : entries) { CacheDirectiveInfo info = new CacheDirectiveInfo.Builder() .setId(maskDirectiveId(entry.getInfo().getId(), fsIndex)) .setPath(entry.getInfo().getPath()) .setReplication(entry.getInfo().getReplication()) .setPool(entry.getInfo().getPool()) .setExpiration(entry.getInfo().getExpiration()) .build(); masked.add(new CacheDirectiveEntry(info, entry.getStats())); } return masked; }
Example #16
Source File: DistributedFileSystem.java From big-c with Apache License 2.0 | 5 votes |
/** * List cache directives. Incrementally fetches results from the server. * * @param filter Filter parameters to use when listing the directives, null to * list all directives visible to us. * @return A RemoteIterator which returns CacheDirectiveInfo objects. */ public RemoteIterator<CacheDirectiveEntry> listCacheDirectives( CacheDirectiveInfo filter) throws IOException { if (filter == null) { filter = new CacheDirectiveInfo.Builder().build(); } if (filter.getPath() != null) { filter = new CacheDirectiveInfo.Builder(filter). setPath(new Path(getPathName(fixRelativePart(filter.getPath())))). build(); } final RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives(filter); return new RemoteIterator<CacheDirectiveEntry>() { @Override public boolean hasNext() throws IOException { return iter.hasNext(); } @Override public CacheDirectiveEntry next() throws IOException { // Although the paths we get back from the NameNode should always be // absolute, we call makeQualified to add the scheme and authority of // this DistributedFilesystem. CacheDirectiveEntry desc = iter.next(); CacheDirectiveInfo info = desc.getInfo(); Path p = info.getPath().makeQualified(getUri(), getWorkingDirectory()); return new CacheDirectiveEntry( new CacheDirectiveInfo.Builder(info).setPath(p).build(), desc.getStats()); } }; }
Example #17
Source File: NameNodeRpcServer.java From big-c with Apache License 2.0 | 5 votes |
@Override // ClientProtocol public BatchedEntries<CacheDirectiveEntry> listCacheDirectives(long prevId, CacheDirectiveInfo filter) throws IOException { checkNNStartup(); if (filter == null) { filter = new CacheDirectiveInfo.Builder().build(); } return namesystem.listCacheDirectives(prevId, filter); }
Example #18
Source File: TestCacheDirectives.java From hadoop with Apache License 2.0 | 5 votes |
private static void validateListAll( RemoteIterator<CacheDirectiveEntry> iter, Long... ids) throws Exception { for (Long id: ids) { assertTrue("Unexpectedly few elements", iter.hasNext()); assertEquals("Unexpected directive ID", id, iter.next().getInfo().getId()); } assertFalse("Unexpectedly many list elements", iter.hasNext()); }
Example #19
Source File: TestCacheDirectives.java From hadoop with Apache License 2.0 | 5 votes |
@After public void teardown() throws Exception { // Remove cache directives left behind by tests so that we release mmaps. RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives(null); while (iter.hasNext()) { dfs.removeCacheDirective(iter.next().getInfo().getId()); } waitForCachedBlocks(namenode, 0, 0, "teardown"); if (cluster != null) { cluster.shutdown(); } // Restore the original CacheManipulator NativeIO.POSIX.setCacheManipulator(prevCacheManipulator); }
Example #20
Source File: TestCacheDirectives.java From big-c with Apache License 2.0 | 5 votes |
@After public void teardown() throws Exception { // Remove cache directives left behind by tests so that we release mmaps. RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives(null); while (iter.hasNext()) { dfs.removeCacheDirective(iter.next().getInfo().getId()); } waitForCachedBlocks(namenode, 0, 0, "teardown"); if (cluster != null) { cluster.shutdown(); } // Restore the original CacheManipulator NativeIO.POSIX.setCacheManipulator(prevCacheManipulator); }
Example #21
Source File: PBHelper.java From hadoop with Apache License 2.0 | 5 votes |
public static CacheDirectiveEntryProto convert(CacheDirectiveEntry entry) { CacheDirectiveEntryProto.Builder builder = CacheDirectiveEntryProto.newBuilder(); builder.setInfo(PBHelper.convert(entry.getInfo())); builder.setStats(PBHelper.convert(entry.getStats())); return builder.build(); }
Example #22
Source File: TestCacheDirectives.java From big-c with Apache License 2.0 | 5 votes |
private static void validateListAll( RemoteIterator<CacheDirectiveEntry> iter, Long... ids) throws Exception { for (Long id: ids) { assertTrue("Unexpectedly few elements", iter.hasNext()); assertEquals("Unexpected directive ID", id, iter.next().getInfo().getId()); } assertFalse("Unexpectedly many list elements", iter.hasNext()); }
Example #23
Source File: NameNodeRpcServer.java From hadoop with Apache License 2.0 | 5 votes |
@Override // ClientProtocol public BatchedEntries<CacheDirectiveEntry> listCacheDirectives(long prevId, CacheDirectiveInfo filter) throws IOException { checkNNStartup(); if (filter == null) { filter = new CacheDirectiveInfo.Builder().build(); } return namesystem.listCacheDirectives(prevId, filter); }
Example #24
Source File: DistributedFileSystem.java From hadoop with Apache License 2.0 | 5 votes |
/** * List cache directives. Incrementally fetches results from the server. * * @param filter Filter parameters to use when listing the directives, null to * list all directives visible to us. * @return A RemoteIterator which returns CacheDirectiveInfo objects. */ public RemoteIterator<CacheDirectiveEntry> listCacheDirectives( CacheDirectiveInfo filter) throws IOException { if (filter == null) { filter = new CacheDirectiveInfo.Builder().build(); } if (filter.getPath() != null) { filter = new CacheDirectiveInfo.Builder(filter). setPath(new Path(getPathName(fixRelativePart(filter.getPath())))). build(); } final RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives(filter); return new RemoteIterator<CacheDirectiveEntry>() { @Override public boolean hasNext() throws IOException { return iter.hasNext(); } @Override public CacheDirectiveEntry next() throws IOException { // Although the paths we get back from the NameNode should always be // absolute, we call makeQualified to add the scheme and authority of // this DistributedFilesystem. CacheDirectiveEntry desc = iter.next(); CacheDirectiveInfo info = desc.getInfo(); Path p = info.getPath().makeQualified(getUri(), getWorkingDirectory()); return new CacheDirectiveEntry( new CacheDirectiveInfo.Builder(info).setPath(p).build(), desc.getStats()); } }; }
Example #25
Source File: CacheAdmin.java From big-c with Apache License 2.0 | 4 votes |
@Override public int run(Configuration conf, List<String> args) throws IOException { CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder(); String pathFilter = StringUtils.popOptionWithArgument("-path", args); if (pathFilter != null) { builder.setPath(new Path(pathFilter)); } String poolFilter = StringUtils.popOptionWithArgument("-pool", args); if (poolFilter != null) { builder.setPool(poolFilter); } boolean printStats = StringUtils.popOption("-stats", args); String idFilter = StringUtils.popOptionWithArgument("-id", args); if (idFilter != null) { builder.setId(Long.parseLong(idFilter)); } if (!args.isEmpty()) { System.err.println("Can't understand argument: " + args.get(0)); return 1; } TableListing.Builder tableBuilder = new TableListing.Builder(). addField("ID", Justification.RIGHT). addField("POOL", Justification.LEFT). addField("REPL", Justification.RIGHT). addField("EXPIRY", Justification.LEFT). addField("PATH", Justification.LEFT); if (printStats) { tableBuilder.addField("BYTES_NEEDED", Justification.RIGHT). addField("BYTES_CACHED", Justification.RIGHT). addField("FILES_NEEDED", Justification.RIGHT). addField("FILES_CACHED", Justification.RIGHT); } TableListing tableListing = tableBuilder.build(); try { DistributedFileSystem dfs = AdminHelper.getDFS(conf); RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives(builder.build()); int numEntries = 0; while (iter.hasNext()) { CacheDirectiveEntry entry = iter.next(); CacheDirectiveInfo directive = entry.getInfo(); CacheDirectiveStats stats = entry.getStats(); List<String> row = new LinkedList<String>(); row.add("" + directive.getId()); row.add(directive.getPool()); row.add("" + directive.getReplication()); String expiry; // This is effectively never, round for nice printing if (directive.getExpiration().getMillis() > Expiration.MAX_RELATIVE_EXPIRY_MS / 2) { expiry = "never"; } else { expiry = directive.getExpiration().toString(); } row.add(expiry); row.add(directive.getPath().toUri().getPath()); if (printStats) { row.add("" + stats.getBytesNeeded()); row.add("" + stats.getBytesCached()); row.add("" + stats.getFilesNeeded()); row.add("" + stats.getFilesCached()); } tableListing.addRow(row.toArray(new String[row.size()])); numEntries++; } System.out.print(String.format("Found %d entr%s%n", numEntries, numEntries == 1 ? "y" : "ies")); if (numEntries > 0) { System.out.print(tableListing); } } catch (IOException e) { System.err.println(AdminHelper.prettifyException(e)); return 2; } return 0; }
Example #26
Source File: ClientNamenodeProtocolTranslatorPB.java From big-c with Apache License 2.0 | 4 votes |
@Override public CacheDirectiveEntry get(int i) { return PBHelper.convert(response.getElements(i)); }
Example #27
Source File: TestCacheDirectives.java From big-c with Apache License 2.0 | 4 votes |
private static void waitForCacheDirectiveStats(final DistributedFileSystem dfs, final long targetBytesNeeded, final long targetBytesCached, final long targetFilesNeeded, final long targetFilesCached, final CacheDirectiveInfo filter, final String infoString) throws Exception { LOG.info("Polling listCacheDirectives " + ((filter == null) ? "ALL" : filter.toString()) + " for " + targetBytesNeeded + " targetBytesNeeded, " + targetBytesCached + " targetBytesCached, " + targetFilesNeeded + " targetFilesNeeded, " + targetFilesCached + " targetFilesCached"); GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { RemoteIterator<CacheDirectiveEntry> iter = null; CacheDirectiveEntry entry = null; try { iter = dfs.listCacheDirectives(filter); entry = iter.next(); } catch (IOException e) { fail("got IOException while calling " + "listCacheDirectives: " + e.getMessage()); } Assert.assertNotNull(entry); CacheDirectiveStats stats = entry.getStats(); if ((targetBytesNeeded == stats.getBytesNeeded()) && (targetBytesCached == stats.getBytesCached()) && (targetFilesNeeded == stats.getFilesNeeded()) && (targetFilesCached == stats.getFilesCached())) { return true; } else { LOG.info(infoString + ": " + "filesNeeded: " + stats.getFilesNeeded() + "/" + targetFilesNeeded + ", filesCached: " + stats.getFilesCached() + "/" + targetFilesCached + ", bytesNeeded: " + stats.getBytesNeeded() + "/" + targetBytesNeeded + ", bytesCached: " + stats.getBytesCached() + "/" + targetBytesCached); return false; } } }, 500, 60000); }
Example #28
Source File: DFSClient.java From hadoop with Apache License 2.0 | 4 votes |
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives( CacheDirectiveInfo filter) throws IOException { return new CacheDirectiveIterator(namenode, filter, traceSampler); }
Example #29
Source File: PBHelper.java From big-c with Apache License 2.0 | 4 votes |
public static CacheDirectiveEntry convert(CacheDirectiveEntryProto proto) { CacheDirectiveInfo info = PBHelper.convert(proto.getInfo()); CacheDirectiveStats stats = PBHelper.convert(proto.getStats()); return new CacheDirectiveEntry(info, stats); }
Example #30
Source File: TestCacheDirectives.java From big-c with Apache License 2.0 | 4 votes |
@Test(timeout=120000) public void testWaitForCachedReplicas() throws Exception { FileSystemTestHelper helper = new FileSystemTestHelper(); GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { return ((namenode.getNamesystem().getCacheCapacity() == (NUM_DATANODES * CACHE_CAPACITY)) && (namenode.getNamesystem().getCacheUsed() == 0)); } }, 500, 60000); // Send a cache report referring to a bogus block. It is important that // the NameNode be robust against this. NamenodeProtocols nnRpc = namenode.getRpcServer(); DataNode dn0 = cluster.getDataNodes().get(0); String bpid = cluster.getNamesystem().getBlockPoolId(); LinkedList<Long> bogusBlockIds = new LinkedList<Long> (); bogusBlockIds.add(999999L); nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds); Path rootDir = helper.getDefaultWorkingDirectory(dfs); // Create the pool final String pool = "friendlyPool"; nnRpc.addCachePool(new CachePoolInfo("friendlyPool")); // Create some test files final int numFiles = 2; final int numBlocksPerFile = 2; final List<String> paths = new ArrayList<String>(numFiles); for (int i=0; i<numFiles; i++) { Path p = new Path(rootDir, "testCachePaths-" + i); FileSystemTestHelper.createFile(dfs, p, numBlocksPerFile, (int)BLOCK_SIZE); paths.add(p.toUri().getPath()); } // Check the initial statistics at the namenode waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0"); // Cache and check each path in sequence int expected = 0; for (int i=0; i<numFiles; i++) { CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder(). setPath(new Path(paths.get(i))). setPool(pool). build(); nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class)); expected += numBlocksPerFile; waitForCachedBlocks(namenode, expected, expected, "testWaitForCachedReplicas:1"); } // Check that the datanodes have the right cache values DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE); assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length); long totalUsed = 0; for (DatanodeInfo dn : live) { final long cacheCapacity = dn.getCacheCapacity(); final long cacheUsed = dn.getCacheUsed(); final long cacheRemaining = dn.getCacheRemaining(); assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity); assertEquals("Capacity not equal to used + remaining", cacheCapacity, cacheUsed + cacheRemaining); assertEquals("Remaining not equal to capacity - used", cacheCapacity - cacheUsed, cacheRemaining); totalUsed += cacheUsed; } assertEquals(expected*BLOCK_SIZE, totalUsed); // Uncache and check each path in sequence RemoteIterator<CacheDirectiveEntry> entries = new CacheDirectiveIterator(nnRpc, null, Sampler.NEVER); for (int i=0; i<numFiles; i++) { CacheDirectiveEntry entry = entries.next(); nnRpc.removeCacheDirective(entry.getInfo().getId()); expected -= numBlocksPerFile; waitForCachedBlocks(namenode, expected, expected, "testWaitForCachedReplicas:2"); } }