Java Code Examples for org.apache.hadoop.test.GenericTestUtils#waitFor()
The following examples show how to use
org.apache.hadoop.test.GenericTestUtils#waitFor() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestBPOfferService.java From hadoop with Apache License 2.0 | 6 votes |
private void waitForOneToFail(final BPOfferService bpos) throws Exception { GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { List<BPServiceActor> actors = bpos.getBPServiceActors(); int failedcount = 0; for (BPServiceActor actor : actors) { if (!actor.isAlive()) { failedcount++; } } return failedcount == 1; } }, 100, 10000); }
Example 2
Source File: TestBlockDeletingService.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Test @SuppressWarnings("java:S2699") // waitFor => assertion with timeout public void testShutdownService() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500, TimeUnit.MILLISECONDS); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 10); ContainerSet containerSet = new ContainerSet(); // Create 1 container with 100 blocks createToDeleteBlocks(containerSet, conf, 1, 100, 1); BlockDeletingServiceTestImpl service = getBlockDeletingService(containerSet, conf); service.start(); GenericTestUtils.waitFor(service::isStarted, 100, 3000); // Run some deleting tasks and verify there are threads running service.runDeletingTasks(); GenericTestUtils.waitFor(() -> service.getThreadCount() > 0, 100, 1000); // Shutdown service and verify all threads are stopped service.shutdown(); GenericTestUtils.waitFor(() -> service.getThreadCount() == 0, 100, 1000); }
Example 3
Source File: TestEditLogAutoroll.java From hadoop with Apache License 2.0 | 6 votes |
@Test(timeout=60000) public void testEditLogAutoroll() throws Exception { // Make some edits final long startTxId = editLog.getCurSegmentTxId(); for (int i=0; i<11; i++) { fs.mkdirs(new Path("testEditLogAutoroll-" + i)); } // Wait for the NN to autoroll GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { return editLog.getCurSegmentTxId() > startTxId; } }, 1000, 5000); // Transition to standby and make sure the roller stopped nn0.transitionToStandby(); GenericTestUtils.assertNoThreadsMatching( ".*" + NameNodeEditLogRoller.class.getSimpleName() + ".*"); }
Example 4
Source File: MiniOzoneClusterImpl.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Waits for SCM to be out of Safe Mode. Many tests can be run iff we are out * of Safe mode. * * @throws TimeoutException * @throws InterruptedException */ @Override public void waitTobeOutOfSafeMode() throws TimeoutException, InterruptedException { GenericTestUtils.waitFor(() -> { if (!scm.isInSafeMode()) { return true; } LOG.info("Waiting for cluster to be ready. No datanodes found"); return false; }, 100, 1000 * 45); }
Example 5
Source File: TestNodeFailure.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Waits until the Pipeline is marked as OPEN. * @param pipelineID Id of the pipeline */ private void waitForPipelineCreation(final PipelineID pipelineID) throws Exception { GenericTestUtils.waitFor(() -> { try { return pipelineManager.getPipeline(pipelineID) .getPipelineState().equals(Pipeline.PipelineState.OPEN); } catch (PipelineNotFoundException ex) { return false; } }, 1000, 1000 * 60); }
Example 6
Source File: TestStandbyCheckpoints.java From big-c with Apache License 2.0 | 5 votes |
@Test(timeout = 300000) public void testSBNCheckpoints() throws Exception { JournalSet standbyJournalSet = NameNodeAdapter.spyOnJournalSet(nn1); doEdits(0, 10); HATestUtil.waitForStandbyToCatchUp(nn0, nn1); // Once the standby catches up, it should notice that it needs to // do a checkpoint and save one to its local directories. HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12)); GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { if(tmpOivImgDir.list().length > 0) { return true; } else { return false; } } }, 1000, 60000); // It should have saved the oiv image too. assertEquals("One file is expected", 1, tmpOivImgDir.list().length); // It should also upload it back to the active. HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12)); // The standby should never try to purge edit logs on shared storage. Mockito.verify(standbyJournalSet, Mockito.never()). purgeLogsOlderThan(Mockito.anyLong()); }
Example 7
Source File: TestBackupNode.java From hadoop with Apache License 2.0 | 5 votes |
private void testBNInSync(MiniDFSCluster cluster, final BackupNode backup, int testIdx) throws Exception { final NameNode nn = cluster.getNameNode(); final FileSystem fs = cluster.getFileSystem(); // Do a bunch of namespace operations, make sure they're replicated // to the BN. for (int i = 0; i < 10; i++) { final String src = "/test_" + testIdx + "_" + i; LOG.info("Creating " + src + " on NN"); Path p = new Path(src); assertTrue(fs.mkdirs(p)); GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { LOG.info("Checking for " + src + " on BN"); try { boolean hasFile = backup.getNamesystem().getFileInfo(src, false) != null; boolean txnIdMatch = backup.getRpcServer().getTransactionID() == nn.getRpcServer().getTransactionID(); return hasFile && txnIdMatch; } catch (Exception e) { throw new RuntimeException(e); } } }, 30, 10000); } assertStorageDirsMatch(nn, backup); }
Example 8
Source File: TestRatisPipelineCreateAndDestroy.java From hadoop-ozone with Apache License 2.0 | 5 votes |
private void waitForPipelines(int numPipelines) throws TimeoutException, InterruptedException { GenericTestUtils.waitFor(() -> pipelineManager .getPipelines(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN) .size() >= numPipelines, 100, 40000); }
Example 9
Source File: TestBackupNode.java From big-c with Apache License 2.0 | 5 votes |
private void testBNInSync(MiniDFSCluster cluster, final BackupNode backup, int testIdx) throws Exception { final NameNode nn = cluster.getNameNode(); final FileSystem fs = cluster.getFileSystem(); // Do a bunch of namespace operations, make sure they're replicated // to the BN. for (int i = 0; i < 10; i++) { final String src = "/test_" + testIdx + "_" + i; LOG.info("Creating " + src + " on NN"); Path p = new Path(src); assertTrue(fs.mkdirs(p)); GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { LOG.info("Checking for " + src + " on BN"); try { boolean hasFile = backup.getNamesystem().getFileInfo(src, false) != null; boolean txnIdMatch = backup.getRpcServer().getTransactionID() == nn.getRpcServer().getTransactionID(); return hasFile && txnIdMatch; } catch (Exception e) { throw new RuntimeException(e); } } }, 30, 10000); } assertStorageDirsMatch(nn, backup); }
Example 10
Source File: DFSTestUtil.java From hadoop with Apache License 2.0 | 5 votes |
/** * Blocks until cache usage hits the expected new value. */ public static long verifyExpectedCacheUsage(final long expectedCacheUsed, final long expectedBlocks, final FsDatasetSpi<?> fsd) throws Exception { GenericTestUtils.waitFor(new Supplier<Boolean>() { private int tries = 0; @Override public Boolean get() { long curCacheUsed = fsd.getCacheUsed(); long curBlocks = fsd.getNumBlocksCached(); if ((curCacheUsed != expectedCacheUsed) || (curBlocks != expectedBlocks)) { if (tries++ > 10) { LOG.info("verifyExpectedCacheUsage: have " + curCacheUsed + "/" + expectedCacheUsed + " bytes cached; " + curBlocks + "/" + expectedBlocks + " blocks cached. " + "memlock limit = " + NativeIO.POSIX.getCacheManipulator().getMemlockLimit() + ". Waiting..."); } return false; } LOG.info("verifyExpectedCacheUsage: got " + curCacheUsed + "/" + expectedCacheUsed + " bytes cached; " + curBlocks + "/" + expectedBlocks + " blocks cached. " + "memlock limit = " + NativeIO.POSIX.getCacheManipulator().getMemlockLimit()); return true; } }, 100, 60000); return expectedCacheUsed; }
Example 11
Source File: TestLogAggregationService.java From hadoop with Apache License 2.0 | 4 votes |
@Test (timeout = 20000) public void testAddNewTokenSentFromRMForLogAggregation() throws Exception { Configuration conf = new YarnConfiguration(); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1); Application mockApp = mock(Application.class); when(mockApp.getContainers()).thenReturn( new HashMap<ContainerId, Container>()); this.context.getApplications().put(application1, mockApp); @SuppressWarnings("resource") LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler); logAggregationService.init(this.conf); logAggregationService.start(); logAggregationService.handle(new LogHandlerAppStartedEvent(application1, this.user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls, Records.newRecord(LogAggregationContext.class))); // Inject new token for log-aggregation after app log-aggregator init Text userText1 = new Text("user1"); RMDelegationTokenIdentifier dtId1 = new RMDelegationTokenIdentifier(userText1, new Text("renewer1"), userText1); final Token<RMDelegationTokenIdentifier> token1 = new Token<RMDelegationTokenIdentifier>(dtId1.getBytes(), "password1".getBytes(), dtId1.getKind(), new Text("service1")); Credentials credentials = new Credentials(); credentials.addToken(userText1, token1); this.context.getSystemCredentialsForApps().put(application1, credentials); logAggregationService.handle(new LogHandlerAppFinishedEvent(application1)); final UserGroupInformation ugi = ((AppLogAggregatorImpl) logAggregationService.getAppLogAggregators() .get(application1)).getUgi(); GenericTestUtils.waitFor(new Supplier<Boolean>() { public Boolean get() { boolean hasNewToken = false; for (Token<?> token : ugi.getCredentials().getAllTokens()) { if (token.equals(token1)) { hasNewToken = true; } } return hasNewToken; } }, 1000, 20000); logAggregationService.stop(); }
Example 12
Source File: TestSCMSafeModeManager.java From hadoop-ozone with Apache License 2.0 | 4 votes |
private void checkOpen(int expectedCount) throws Exception { GenericTestUtils.waitFor(() -> scmSafeModeManager .getOneReplicaPipelineSafeModeRule() .getCurrentReportedPipelineCount() == expectedCount, 1000, 5000); }
Example 13
Source File: TestCloseContainerByPipeline.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Test public void testQuasiCloseTransitionViaRatis() throws IOException, TimeoutException, InterruptedException { String keyName = "testQuasiCloseTransitionViaRatis"; OzoneOutputStream key = objectStore.getVolume("test").getBucket("test") .createKey(keyName, 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); key.write(keyName.getBytes()); key.close(); OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test") .setType(HddsProtos.ReplicationType.RATIS) .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024) .setKeyName(keyName) .setRefreshPipeline(true) .build(); OmKeyLocationInfo omKeyLocationInfo = cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() .get(0).getBlocksLatestVersionOnly().get(0); long containerID = omKeyLocationInfo.getContainerID(); ContainerInfo container = cluster.getStorageContainerManager() .getContainerManager().getContainer(ContainerID.valueof(containerID)); Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipeline(container.getPipelineID()); List<DatanodeDetails> datanodes = pipeline.getNodes(); Assert.assertEquals(datanodes.size(), 1); DatanodeDetails datanodeDetails = datanodes.get(0); Assert .assertFalse(isContainerClosed(cluster, containerID, datanodeDetails)); // close the pipeline cluster.getStorageContainerManager() .getPipelineManager().finalizeAndDestroyPipeline(pipeline, false); // All the containers in OPEN or CLOSING state should transition to // QUASI-CLOSED after pipeline close GenericTestUtils.waitFor( () -> isContainerQuasiClosed(cluster, containerID, datanodeDetails), 500, 5 * 1000); Assert.assertTrue( isContainerQuasiClosed(cluster, containerID, datanodeDetails)); // Send close container command from SCM to datanode with forced flag as // true cluster.getStorageContainerManager().getScmNodeManager() .addDatanodeCommand(datanodeDetails.getUuid(), new CloseContainerCommand(containerID, pipeline.getId(), true)); GenericTestUtils .waitFor(() -> isContainerClosed( cluster, containerID, datanodeDetails), 500, 5 * 1000); Assert.assertTrue( isContainerClosed(cluster, containerID, datanodeDetails)); }
Example 14
Source File: TestDeleteContainerHandler.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testDeleteContainerRequestHandlerOnClosedContainer() throws Exception { //the easiest way to create an open container is creating a key String keyName = UUID.randomUUID().toString(); // create key createKey(keyName); // get containerID of the key ContainerID containerId = getContainerID(keyName); ContainerInfo container = cluster.getStorageContainerManager() .getContainerManager().getContainer(containerId); Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipeline(container.getPipelineID()); // We need to close the container because delete container only happens // on closed containers with force flag set to false. HddsDatanodeService hddsDatanodeService = cluster.getHddsDatanodes().get(0); Assert.assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId())); DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails(); NodeManager nodeManager = cluster.getStorageContainerManager().getScmNodeManager(); //send the order to close the container nodeManager.addDatanodeCommand(datanodeDetails.getUuid(), new CloseContainerCommand(containerId.getId(), pipeline.getId())); GenericTestUtils.waitFor(() -> isContainerClosed(hddsDatanodeService, containerId.getId()), 500, 5 * 1000); //double check if it's really closed (waitFor also throws an exception) Assert.assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId())); // Check container exists before sending delete container command Assert.assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId())); // send delete container to the datanode nodeManager.addDatanodeCommand(datanodeDetails.getUuid(), new DeleteContainerCommand(containerId.getId(), false)); GenericTestUtils.waitFor(() -> isContainerDeleted(hddsDatanodeService, containerId.getId()), 500, 5 * 1000); Assert.assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId())); }
Example 15
Source File: TestHealthyPipelineSafeModeRule.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Test public void testHealthyPipelineSafeModeRuleWithMixedPipelines() throws Exception { String storageDir = GenericTestUtils.getTempPath( TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID()); EventQueue eventQueue = new EventQueue(); List<ContainerInfo> containers = new ArrayList<>(HddsTestUtils.getContainerInfo(1)); OzoneConfiguration config = new OzoneConfiguration(); // In Mock Node Manager, first 8 nodes are healthy, next 2 nodes are // stale and last one is dead, and this repeats. So for a 12 node, 9 // healthy, 2 stale and one dead. MockNodeManager nodeManager = new MockNodeManager(true, 12); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); // enable pipeline check config.setBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true); config.setBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(config); try { SCMPipelineManager pipelineManager = new SCMPipelineManager(config, nodeManager, scmMetadataStore.getPipelineTable(), eventQueue); pipelineManager.allowPipelineCreation(); PipelineProvider mockRatisProvider = new MockRatisPipelineProvider(nodeManager, pipelineManager.getStateManager(), config, true); pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, mockRatisProvider); // Create 3 pipelines Pipeline pipeline1 = pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); Pipeline pipeline2 = pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); Pipeline pipeline3 = pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( config, containers, pipelineManager, eventQueue); HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = scmSafeModeManager.getHealthyPipelineSafeModeRule(); // No pipeline event have sent to SCMSafemodeManager Assert.assertFalse(healthyPipelineSafeModeRule.validate()); GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(LoggerFactory.getLogger( SCMSafeModeManager.class)); // fire event with pipeline create status with ratis type and factor 1 // pipeline, validate() should return false firePipelineEvent(pipeline1, eventQueue); GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains( "reported count is 1"), 1000, 5000); Assert.assertFalse(healthyPipelineSafeModeRule.validate()); firePipelineEvent(pipeline2, eventQueue); firePipelineEvent(pipeline3, eventQueue); GenericTestUtils.waitFor(() -> healthyPipelineSafeModeRule.validate(), 1000, 5000); } finally { scmMetadataStore.getStore().close(); FileUtil.fullyDelete(new File(storageDir)); } }
Example 16
Source File: TestHealthyPipelineSafeModeRule.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Test public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { String storageDir = GenericTestUtils.getTempPath( TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID()); EventQueue eventQueue = new EventQueue(); List<ContainerInfo> containers = new ArrayList<>(HddsTestUtils.getContainerInfo(1)); OzoneConfiguration config = new OzoneConfiguration(); // In Mock Node Manager, first 8 nodes are healthy, next 2 nodes are // stale and last one is dead, and this repeats. So for a 12 node, 9 // healthy, 2 stale and one dead. MockNodeManager nodeManager = new MockNodeManager(true, 12); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); // enable pipeline check config.setBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true); config.setBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(config); try { SCMPipelineManager pipelineManager = new SCMPipelineManager(config, nodeManager, scmMetadataStore.getPipelineTable(), eventQueue); pipelineManager.allowPipelineCreation(); PipelineProvider mockRatisProvider = new MockRatisPipelineProvider(nodeManager, pipelineManager.getStateManager(), config, true); pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, mockRatisProvider); // Create 3 pipelines Pipeline pipeline1 = pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); Pipeline pipeline2 = pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); Pipeline pipeline3 = pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( config, containers, pipelineManager, eventQueue); HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = scmSafeModeManager.getHealthyPipelineSafeModeRule(); // No datanodes have sent pipelinereport from datanode Assert.assertFalse(healthyPipelineSafeModeRule.validate()); // Fire pipeline report from all datanodes in first pipeline, as here we // have 3 pipelines, 10% is 0.3, when doing ceil it is 1. So, we should // validate should return true after fire pipeline event //Here testing with out pipelinereport handler, so not moving created // pipelines to allocated state, as pipelines changing to healthy is // handled by pipeline report handler. So, leaving pipeline's in pipeline // manager in open state for test case simplicity. firePipelineEvent(pipeline1, eventQueue); GenericTestUtils.waitFor(() -> healthyPipelineSafeModeRule.validate(), 1000, 5000); } finally { scmMetadataStore.getStore().close(); FileUtil.fullyDelete(new File(storageDir)); } }
Example 17
Source File: TestKeyPurging.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Test(timeout = 30000) public void testKeysPurgingByKeyDeletingService() throws Exception { // Create Volume and Bucket String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); // Create some keys and write data into them String keyBase = UUID.randomUUID().toString(); String keyString = UUID.randomUUID().toString(); byte[] data = ContainerTestHelper.getFixedLengthString( keyString, KEY_SIZE).getBytes(UTF_8); List<String> keys = new ArrayList<>(NUM_KEYS); for (int i = 1; i <= NUM_KEYS; i++) { String keyName = keyBase + "-" + i; keys.add(keyName); OzoneOutputStream keyStream = TestHelper.createKey( keyName, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, KEY_SIZE, store, volumeName, bucketName); keyStream.write(data); keyStream.close(); } // Delete created keys for (String key : keys) { bucket.deleteKey(key); } // Verify that KeyDeletingService picks up deleted keys and purges them // from DB. KeyManager keyManager = om.getKeyManager(); KeyDeletingService keyDeletingService = (KeyDeletingService) keyManager.getDeletingService(); GenericTestUtils.waitFor( () -> keyDeletingService.getDeletedKeyCount().get() >= NUM_KEYS, 1000, 10000); Assert.assertTrue(keyDeletingService.getRunCount().get() > 1); GenericTestUtils.waitFor( () -> { try { return keyManager.getPendingDeletionKeys(Integer.MAX_VALUE) .size() == 0; } catch (IOException e) { return false; } }, 1000, 10000); }
Example 18
Source File: TestTypedRDBTableStore.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Test public void testTypedTableWithCacheWithFewDeletedOperationType() throws Exception { int iterCount = 10; try (Table<String, String> testTable = createTypedTable( "Seven")) { for (int x = 0; x < iterCount; x++) { String key = Integer.toString(x); String value = Integer.toString(x); if (x % 2 == 0) { testTable.addCacheEntry(new CacheKey<>(key), new CacheValue<>(Optional.of(value), x)); } else { testTable.addCacheEntry(new CacheKey<>(key), new CacheValue<>(Optional.absent(), x)); } } // As we have added to cache, so get should return value even if it // does not exist in DB. for (int x = 0; x < iterCount; x++) { if (x % 2 == 0) { Assert.assertEquals(Integer.toString(x), testTable.get(Integer.toString(x))); } else { Assert.assertNull(testTable.get(Integer.toString(x))); } } ArrayList<Long> epochs = new ArrayList<>(); for (long i=0; i<=5L; i++) { epochs.add(i); } testTable.cleanupCache(epochs); GenericTestUtils.waitFor(() -> ((TypedTable<String, String>) testTable).getCache().size() == 4, 100, 5000); //Check remaining values for (int x = 6; x < iterCount; x++) { if (x % 2 == 0) { Assert.assertEquals(Integer.toString(x), testTable.get(Integer.toString(x))); } else { Assert.assertNull(testTable.get(Integer.toString(x))); } } } }
Example 19
Source File: TestSafeMode.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test that the NN initializes its under-replicated blocks queue * before it is ready to exit safemode (HDFS-1476) */ @Test(timeout=45000) public void testInitializeReplQueuesEarly() throws Exception { LOG.info("Starting testInitializeReplQueuesEarly"); // Spray the blocks around the cluster when we add DNs instead of // concentrating all blocks on the first node. BlockManagerTestUtil.setWritingPrefersLocalNode( cluster.getNamesystem().getBlockManager(), false); cluster.startDataNodes(conf, 2, true, StartupOption.REGULAR, null); cluster.waitActive(); LOG.info("Creating files"); DFSTestUtil.createFile(fs, TEST_PATH, 15*BLOCK_SIZE, (short)1, 1L); LOG.info("Stopping all DataNodes"); List<DataNodeProperties> dnprops = Lists.newLinkedList(); dnprops.add(cluster.stopDataNode(0)); dnprops.add(cluster.stopDataNode(0)); dnprops.add(cluster.stopDataNode(0)); cluster.getConfiguration(0).setFloat( DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY, 1f/15f); LOG.info("Restarting NameNode"); cluster.restartNameNode(); final NameNode nn = cluster.getNameNode(); String status = nn.getNamesystem().getSafemode(); assertEquals("Safe mode is ON. The reported blocks 0 needs additional " + "15 blocks to reach the threshold 0.9990 of total blocks 15." + NEWLINE + "The number of live datanodes 0 has reached the minimum number 0. " + "Safe mode will be turned off automatically once the thresholds " + "have been reached.", status); assertFalse("Mis-replicated block queues should not be initialized " + "until threshold is crossed", NameNodeAdapter.safeModeInitializedReplQueues(nn)); LOG.info("Restarting one DataNode"); cluster.restartDataNode(dnprops.remove(0)); // Wait for block reports from all attached storages of // the restarted DN to come in. GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { return getLongCounter("StorageBlockReportOps", getMetrics(NN_METRICS)) == cluster.getStoragesPerDatanode(); } }, 10, 10000); final int safe = NameNodeAdapter.getSafeModeSafeBlocks(nn); assertTrue("Expected first block report to make some blocks safe.", safe > 0); assertTrue("Did not expect first block report to make all blocks safe.", safe < 15); assertTrue(NameNodeAdapter.safeModeInitializedReplQueues(nn)); // Ensure that UnderReplicatedBlocks goes up to 15 - safe. Misreplicated // blocks are processed asynchronously so this may take a few seconds. // Failure here will manifest as a test timeout. BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager()); long underReplicatedBlocks = nn.getNamesystem().getUnderReplicatedBlocks(); while (underReplicatedBlocks != (15 - safe)) { LOG.info("UnderReplicatedBlocks expected=" + (15 - safe) + ", actual=" + underReplicatedBlocks); Thread.sleep(100); BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager()); underReplicatedBlocks = nn.getNamesystem().getUnderReplicatedBlocks(); } cluster.restartDataNodes(); }
Example 20
Source File: TestCacheDirectives.java From hadoop with Apache License 2.0 | 4 votes |
/** * Wait for the NameNode to have an expected number of cached blocks * and replicas. * @param nn NameNode * @param expectedCachedBlocks if -1, treat as wildcard * @param expectedCachedReplicas if -1, treat as wildcard * @throws Exception */ private static void waitForCachedBlocks(NameNode nn, final int expectedCachedBlocks, final int expectedCachedReplicas, final String logString) throws Exception { final FSNamesystem namesystem = nn.getNamesystem(); final CacheManager cacheManager = namesystem.getCacheManager(); LOG.info("Waiting for " + expectedCachedBlocks + " blocks with " + expectedCachedReplicas + " replicas."); GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { int numCachedBlocks = 0, numCachedReplicas = 0; namesystem.readLock(); try { GSet<CachedBlock, CachedBlock> cachedBlocks = cacheManager.getCachedBlocks(); if (cachedBlocks != null) { for (Iterator<CachedBlock> iter = cachedBlocks.iterator(); iter.hasNext(); ) { CachedBlock cachedBlock = iter.next(); numCachedBlocks++; numCachedReplicas += cachedBlock.getDatanodes(Type.CACHED).size(); } } } finally { namesystem.readUnlock(); } LOG.info(logString + " cached blocks: have " + numCachedBlocks + " / " + expectedCachedBlocks + ". " + "cached replicas: have " + numCachedReplicas + " / " + expectedCachedReplicas); if (expectedCachedBlocks == -1 || numCachedBlocks == expectedCachedBlocks) { if (expectedCachedReplicas == -1 || numCachedReplicas == expectedCachedReplicas) { return true; } } return false; } }, 500, 60000); }