org.apache.hadoop.test.GenericTestUtils Java Examples
The following examples show how to use
org.apache.hadoop.test.GenericTestUtils.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BlockReportTestBase.java From big-c with Apache License 2.0 | 6 votes |
/** * Test writes a file and closes it. * Block reported is generated with a bad GS for a single block. * Block report is forced and the check for # of corrupted blocks is performed. * * @throws IOException in case of an error */ @Test(timeout=300000) public void blockReport_03() throws IOException { final String METHOD_NAME = GenericTestUtils.getMethodName(); Path filePath = new Path("/" + METHOD_NAME + ".dat"); writeFile(METHOD_NAME, FILE_SIZE, filePath); // all blocks belong to the same file, hence same BP DataNode dn = cluster.getDataNodes().get(DN_N0); String poolId = cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId); StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false); sendBlockReports(dnR, poolId, reports); printStats(); assertThat("Wrong number of corrupt blocks", cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L)); assertThat("Wrong number of PendingDeletion blocks", cluster.getNamesystem().getPendingDeletionBlocks(), is(0L)); }
Example #2
Source File: TestOpensslCipher.java From big-c with Apache License 2.0 | 6 votes |
@Test(timeout=120000) public void testDoFinalArguments() throws Exception { Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null); OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding"); Assert.assertTrue(cipher != null); cipher.init(OpensslCipher.ENCRYPT_MODE, key, iv); // Require direct buffer ByteBuffer output = ByteBuffer.allocate(1024); try { cipher.doFinal(output); Assert.fail("Output buffer should be direct buffer."); } catch (IllegalArgumentException e) { GenericTestUtils.assertExceptionContains( "Direct buffer is required", e); } }
Example #3
Source File: TestLazyPersistFiles.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testLazyPersistBlocksAreSaved() throws IOException, InterruptedException { startUpCluster(true, -1); final String METHOD_NAME = GenericTestUtils.getMethodName(); Path path = new Path("/" + METHOD_NAME + ".dat"); // Create a test file makeTestFile(path, BLOCK_SIZE * 10, true); LocatedBlocks locatedBlocks = ensureFileReplicasOnStorageType(path, RAM_DISK); // Sleep for a short time to allow the lazy writer thread to do its job Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000); LOG.info("Verifying copy was saved to lazyPersist/"); // Make sure that there is a saved copy of the replica on persistent // storage. ensureLazyPersistBlocksAreSaved(locatedBlocks); }
Example #4
Source File: TestZKFailoverController.java From hadoop with Apache License 2.0 | 6 votes |
@Test(timeout=15000) public void testGracefulFailoverToUnhealthy() throws Exception { try { cluster.start(); cluster.waitForActiveLockHolder(0); // Mark it unhealthy, wait for it to exit election cluster.setHealthy(1, false); cluster.waitForElectorState(1, ActiveStandbyElector.State.INIT); // Ask for failover, it should fail, because it's unhealthy try { cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover(); fail("Did not fail to graceful failover to unhealthy service!"); } catch (ServiceFailedException sfe) { GenericTestUtils.assertExceptionContains( cluster.getService(1).toString() + " is not currently healthy.", sfe); } } finally { cluster.stop(); } }
Example #5
Source File: TestKeyDeletingService.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * In this test, we create a bunch of keys and delete them. Then we start the * KeyDeletingService and pass a SCMClient which does not fail. We make sure * that all the keys that we deleted is picked up and deleted by * OzoneManager. * * @throws IOException - on Failure. */ @Test(timeout = 30000) public void checkIfDeleteServiceisDeletingKeys() throws IOException, TimeoutException, InterruptedException { OzoneConfiguration conf = createConfAndInitValues(); OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(conf); KeyManager keyManager = new KeyManagerImpl( new ScmBlockLocationTestingClient(null, null, 0), metaMgr, conf, UUID.randomUUID().toString(), null); keyManager.start(conf); final int keyCount = 100; createAndDeleteKeys(keyManager, keyCount, 1); KeyDeletingService keyDeletingService = (KeyDeletingService) keyManager.getDeletingService(); GenericTestUtils.waitFor( () -> keyDeletingService.getDeletedKeyCount().get() >= keyCount, 1000, 10000); Assert.assertTrue(keyDeletingService.getRunCount().get() > 1); Assert.assertEquals( keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).size(), 0); }
Example #6
Source File: TestBlockRecovery.java From big-c with Apache License 2.0 | 6 votes |
/** * BlockRecoveryFI_09. some/all DNs failed to update replicas. * * @throws IOException in case of an error */ @Test public void testFailedReplicaUpdate() throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } DataNode spyDN = spy(dn); doThrow(new IOException()).when(spyDN).updateReplicaUnderRecovery( block, RECOVERY_ID, BLOCK_ID, block.getNumBytes()); try { spyDN.syncBlock(rBlock, initBlockRecords(spyDN)); fail("Sync should fail"); } catch (IOException e) { e.getMessage().startsWith("Cannot recover "); } }
Example #7
Source File: TestMetadataStore.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Test public void testDestroyDB() throws IOException { // create a new DB to test db destroy OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() + "-" + storeImpl.toLowerCase() + "-toDestroy"); MetadataStore dbStore = MetadataStoreBuilder.newBuilder() .setConf(conf) .setCreateIfMissing(true) .setDbFile(dbDir) .build(); dbStore.put(getBytes("key1"), getBytes("value1")); dbStore.put(getBytes("key2"), getBytes("value2")); assertFalse(dbStore.isEmpty()); assertTrue(dbDir.exists()); assertTrue(dbDir.listFiles().length > 0); dbStore.destroy(); assertFalse(dbDir.exists()); }
Example #8
Source File: TestPermissionSymlinks.java From big-c with Apache License 2.0 | 6 votes |
private void doDeleteLinkParentNotWritable() throws Exception { // Try to delete where the symlink's parent dir is not writable try { user.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws IOException { FileContext myfc = FileContext.getFileContext(conf); myfc.delete(link, false); return null; } }); fail("Deleted symlink without write permissions on parent!"); } catch (AccessControlException e) { GenericTestUtils.assertExceptionContains("Permission denied", e); } }
Example #9
Source File: TestFiPipelines.java From big-c with Apache License 2.0 | 6 votes |
/** * Test initiates and sets actions created by injection framework. The actions * work with both aspects of sending acknologment packets in a pipeline. * Creates and closes a file of certain length < packet size. * Injected actions will check if number of visible bytes at datanodes equals * to number of acknoleged bytes * * @throws IOException in case of an error */ @Test public void pipeline_04() throws IOException { final String METHOD_NAME = GenericTestUtils.getMethodName(); if(LOG.isDebugEnabled()) { LOG.debug("Running " + METHOD_NAME); } final PipelinesTestUtil.PipelinesTest pipst = (PipelinesTestUtil.PipelinesTest) PipelinesTestUtil.initTest(); pipst.fiCallSetNumBytes.set(new PipelinesTestUtil.ReceivedCheckAction(METHOD_NAME)); pipst.fiCallSetBytesAcked.set(new PipelinesTestUtil.AckedCheckAction(METHOD_NAME)); Path filePath = new Path("/" + METHOD_NAME + ".dat"); FSDataOutputStream fsOut = fs.create(filePath); TestPipelines.writeData(fsOut, 2); fs.close(); }
Example #10
Source File: TestLazyPersistFiles.java From big-c with Apache License 2.0 | 6 votes |
/** * Delete lazy-persist file that has not been persisted to disk. * Memory is freed up and file is gone. * @throws IOException */ @Test public void testDeleteBeforePersist() throws Exception { startUpCluster(true, -1); final String METHOD_NAME = GenericTestUtils.getMethodName(); FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0)); Path path = new Path("/" + METHOD_NAME + ".dat"); makeTestFile(path, BLOCK_SIZE, true); LocatedBlocks locatedBlocks = ensureFileReplicasOnStorageType(path, RAM_DISK); // Delete before persist client.delete(path.toString(), false); Assert.assertFalse(fs.exists(path)); assertThat(verifyDeletedBlocks(locatedBlocks), is(true)); verifyRamDiskJMXMetric("RamDiskBlocksDeletedBeforeLazyPersisted", 1); }
Example #11
Source File: TestEditLogJournalFailures.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testAllEditsDirFailOnWrite() throws IOException { assertTrue(doAnEdit()); // Invalidate both edits journals. invalidateEditsDirAtIndex(0, true, true); invalidateEditsDirAtIndex(1, true, true); // The NN has not terminated (no ExitException thrown) try { doAnEdit(); fail("The previous edit could not be synced to any persistent storage, " + " should have halted the NN"); } catch (RemoteException re) { assertTrue(re.getClassName().contains("ExitException")); GenericTestUtils.assertExceptionContains( "Could not sync enough journals to persistent storage due to " + "No journals available to flush. " + "Unsynced transactions: 1", re); } }
Example #12
Source File: TestBlockRecovery.java From hadoop with Apache License 2.0 | 6 votes |
/** * BlockRecoveryFI_09. some/all DNs failed to update replicas. * * @throws IOException in case of an error */ @Test public void testFailedReplicaUpdate() throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } DataNode spyDN = spy(dn); doThrow(new IOException()).when(spyDN).updateReplicaUnderRecovery( block, RECOVERY_ID, BLOCK_ID, block.getNumBytes()); try { spyDN.syncBlock(rBlock, initBlockRecords(spyDN)); fail("Sync should fail"); } catch (IOException e) { e.getMessage().startsWith("Cannot recover "); } }
Example #13
Source File: TestDatanodeProtocolRetryPolicy.java From hadoop with Apache License 2.0 | 6 votes |
private void waitForBlockReport( final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception { GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { try { Mockito.verify(mockNN).blockReport( Mockito.eq(datanodeRegistration), Mockito.eq(POOL_ID), Mockito.<StorageBlockReport[]>anyObject(), Mockito.<BlockReportContext>anyObject()); return true; } catch (Throwable t) { LOG.info("waiting on block report: " + t.getMessage()); return false; } } }, 500, 100000); }
Example #14
Source File: TestSCMSafeModeManager.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Test public void testFailWithIncorrectValueForHealthyPipelinePercent() throws Exception { try { OzoneConfiguration conf = createConf(100, 0.9); MockNodeManager mockNodeManager = new MockNodeManager(true, 10); PipelineManager pipelineManager = new SCMPipelineManager(conf, mockNodeManager, scmMetadataStore.getPipelineTable(), queue); scmSafeModeManager = new SCMSafeModeManager( conf, containers, pipelineManager, queue); fail("testFailWithIncorrectValueForHealthyPipelinePercent"); } catch (IllegalArgumentException ex) { GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" + " 1.0", ex); } }
Example #15
Source File: TestDatanodeStateMachine.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Assert that starting statemachine executes the Init State. */ @Test public void testStartStopDatanodeStateMachine() throws IOException, InterruptedException, TimeoutException { try (DatanodeStateMachine stateMachine = new DatanodeStateMachine(getNewDatanodeDetails(), conf, null, null)) { stateMachine.startDaemon(); SCMConnectionManager connectionManager = stateMachine.getConnectionManager(); GenericTestUtils.waitFor( () -> { int size = connectionManager.getValues().size(); LOG.info("connectionManager.getValues().size() is {}", size); return size == 1; }, 1000, 30000); stateMachine.stopDaemon(); assertTrue(stateMachine.isDaemonStopped()); } }
Example #16
Source File: TestEditLog.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testFailedOpen() throws Exception { File logDir = new File(TEST_DIR, "testFailedOpen"); logDir.mkdirs(); FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir); try { FileUtil.setWritable(logDir, false); log.openForWrite(); fail("Did no throw exception on only having a bad dir"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "too few journals successfully started", ioe); } finally { FileUtil.setWritable(logDir, true); log.close(); } }
Example #17
Source File: TestBPOfferService.java From big-c with Apache License 2.0 | 6 votes |
private void waitForOneToFail(final BPOfferService bpos) throws Exception { GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { List<BPServiceActor> actors = bpos.getBPServiceActors(); int failedcount = 0; for (BPServiceActor actor : actors) { if (!actor.isAlive()) { failedcount++; } } return failedcount == 1; } }, 100, 10000); }
Example #18
Source File: TestBPOfferService.java From big-c with Apache License 2.0 | 6 votes |
private ReceivedDeletedBlockInfo[] waitForBlockReceived( final ExtendedBlock fakeBlock, final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception { final String fakeBlockPoolId = fakeBlock.getBlockPoolId(); final ArgumentCaptor<StorageReceivedDeletedBlocks[]> captor = ArgumentCaptor.forClass(StorageReceivedDeletedBlocks[].class); GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { try { Mockito.verify(mockNN).blockReceivedAndDeleted( Mockito.<DatanodeRegistration>anyObject(), Mockito.eq(fakeBlockPoolId), captor.capture()); return true; } catch (Throwable t) { return false; } } }, 100, 10000); return captor.getValue()[0].getBlocks(); }
Example #19
Source File: TestServletFilter.java From big-c with Apache License 2.0 | 6 votes |
/** * Similar to the above test case, except that it uses a different API to add * the filter. Regression test for HADOOP-8786. */ @Test public void testContextSpecificServletFilterWhenInitThrowsException() throws Exception { Configuration conf = new Configuration(); HttpServer2 http = createTestServer(conf); HttpServer2.defineFilter(http.webAppContext, "ErrorFilter", ErrorFilter.class.getName(), null, null); try { http.start(); fail("expecting exception"); } catch (IOException e) { GenericTestUtils.assertExceptionContains( "Unable to initialize WebAppContext", e); } }
Example #20
Source File: TestMover.java From big-c with Apache License 2.0 | 5 votes |
/** * Test Mover Cli by specifying a list of files/directories using option "-p". * There is only one namenode (and hence name service) specified in the conf. */ @Test public void testMoverCli() throws Exception { final MiniDFSCluster cluster = new MiniDFSCluster .Builder(new HdfsConfiguration()).numDataNodes(0).build(); try { final Configuration conf = cluster.getConfiguration(0); try { Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "bar"); Assert.fail("Expected exception for illegal path bar"); } catch (IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("bar is not absolute", e); } Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf); Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); Assert.assertEquals(1, namenodes.size()); Assert.assertEquals(1, movePaths.size()); URI nn = namenodes.iterator().next(); Assert.assertTrue(movePaths.containsKey(nn)); Assert.assertNull(movePaths.get(nn)); movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar"); namenodes = DFSUtil.getNsServiceRpcUris(conf); Assert.assertEquals(1, movePaths.size()); nn = namenodes.iterator().next(); Assert.assertTrue(movePaths.containsKey(nn)); checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar")); } finally { cluster.shutdown(); } }
Example #21
Source File: TestEncryptedTransfer.java From big-c with Apache License 2.0 | 5 votes |
private void testEncryptedWrite(int numDns) throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); setEncryptionConfigKeys(conf); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDns).build(); FileSystem fs = getFileSystem(conf); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(SaslDataTransferServer.class)); LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(DataTransferSaslUtil.class)); try { writeTestDataToFile(fs); } finally { logs.stopCapturing(); logs1.stopCapturing(); } assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); fs.close(); if (resolverClazz == null) { // Test client and server negotiate cipher option GenericTestUtils.assertDoesNotMatch(logs.getOutput(), "Server using cipher suite"); // Check the IOStreamPair GenericTestUtils.assertDoesNotMatch(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream."); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #22
Source File: TestSecondaryNameNodeUpgrade.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testChangeNsIDFails() throws IOException { try { doIt(ImmutableMap.of("namespaceID", "2")); Assert.fail("Should throw InconsistentFSStateException"); } catch(IOException e) { GenericTestUtils.assertExceptionContains("Inconsistent checkpoint fields", e); System.out.println("Correctly failed with inconsistent namespaceID: " + e); } }
Example #23
Source File: CryptoStreamsTestBase.java From hadoop with Apache License 2.0 | 5 votes |
/** Test skip. */ @Test(timeout=120000) public void testSkip() throws Exception { OutputStream out = getOutputStream(defaultBufferSize); writeData(out); // Default buffer size InputStream in = getInputStream(defaultBufferSize); byte[] result = new byte[dataLen]; int n1 = readAll(in, result, 0, dataLen / 3); Assert.assertEquals(n1, ((Seekable) in).getPos()); long skipped = in.skip(dataLen / 3); int n2 = readAll(in, result, 0, dataLen); Assert.assertEquals(dataLen, n1 + skipped + n2); byte[] readData = new byte[n2]; System.arraycopy(result, 0, readData, 0, n2); byte[] expectedData = new byte[n2]; System.arraycopy(data, dataLen - n2, expectedData, 0, n2); Assert.assertArrayEquals(readData, expectedData); try { skipped = in.skip(-3); Assert.fail("Skip Negative length should fail."); } catch (IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("Negative skip length", e); } // Skip after EOF skipped = in.skip(3); Assert.assertEquals(skipped, 0); in.close(); }
Example #24
Source File: TestQuorumJournalManager.java From hadoop with Apache License 2.0 | 5 votes |
private void tryRecoveryExpectingFailure() throws IOException { try { QJMTestUtil.recoverAndReturnLastTxn(qjm); fail("Expected to fail recovery"); } catch (QuorumException qe) { GenericTestUtils.assertExceptionContains("Injected", qe); } finally { qjm.close(); } }
Example #25
Source File: TestDnRespectsBlockReportSplitThreshold.java From big-c with Apache License 2.0 | 5 votes |
/** * Test that if splitThreshold is zero, then we always get a separate * call per storage. */ @Test(timeout=300000) public void testAlwaysSplit() throws IOException, InterruptedException { startUpCluster(0); NameNode nn = cluster.getNameNode(); DataNode dn = cluster.getDataNodes().get(0); // Create a file with a few blocks. createFile(GenericTestUtils.getMethodName(), BLOCKS_IN_FILE); // Insert a spy object for the NN RPC. DatanodeProtocolClientSideTranslatorPB nnSpy = DataNodeTestUtils.spyOnBposToNN(dn, nn); // Trigger a block report so there is an interaction with the spy // object. DataNodeTestUtils.triggerBlockReport(dn); ArgumentCaptor<StorageBlockReport[]> captor = ArgumentCaptor.forClass(StorageBlockReport[].class); Mockito.verify(nnSpy, times(cluster.getStoragesPerDatanode())).blockReport( any(DatanodeRegistration.class), anyString(), captor.capture(), Mockito.<BlockReportContext>anyObject()); verifyCapturedArguments(captor, 1, BLOCKS_IN_FILE); }
Example #26
Source File: TestLazyPersistFiles.java From big-c with Apache License 2.0 | 5 votes |
/** * RamDisk eviction should not happen on blocks that are not yet * persisted on disk. * @throws IOException * @throws InterruptedException */ @Test public void testRamDiskEvictionBeforePersist() throws IOException, InterruptedException { startUpCluster(true, 1); final String METHOD_NAME = GenericTestUtils.getMethodName(); Path path1 = new Path("/" + METHOD_NAME + ".01.dat"); Path path2 = new Path("/" + METHOD_NAME + ".02.dat"); final int SEED = 0XFADED; // Stop lazy writer to ensure block for path1 is not persisted to disk. FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0)); makeRandomTestFile(path1, BLOCK_SIZE, true, SEED); ensureFileReplicasOnStorageType(path1, RAM_DISK); // Create second file with a replica on RAM_DISK. makeTestFile(path2, BLOCK_SIZE, true); // Eviction should not happen for block of the first file that is not // persisted yet. ensureFileReplicasOnStorageType(path1, RAM_DISK); ensureFileReplicasOnStorageType(path2, DEFAULT); assert(fs.exists(path1)); assert(fs.exists(path2)); assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED)); }
Example #27
Source File: TestOneReplicaPipelineSafeModeRule.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Test public void testOneReplicaPipelineRule() throws Exception { // As with 30 nodes, We can create 7 pipelines with replication factor 3. // (This is because in node manager for every 10 nodes, 7 nodes are // healthy, 2 are stale one is dead.) int nodes = 30; int pipelineFactorThreeCount = 7; int pipelineCountOne = 0; setup(nodes, pipelineFactorThreeCount, pipelineCountOne); GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs( LoggerFactory.getLogger(SCMSafeModeManager.class)); List<Pipeline> pipelines = pipelineManager.getPipelines(); for (int i = 0; i < pipelineFactorThreeCount -1; i++) { firePipelineEvent(pipelines.get(i)); } // As 90% of 7 with ceil is 7, if we send 6 pipeline reports, rule // validate should be still false. GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains( "reported count is 6"), 1000, 5000); Assert.assertFalse(rule.validate()); //Fire last pipeline event from datanode. firePipelineEvent(pipelines.get(pipelineFactorThreeCount - 1)); GenericTestUtils.waitFor(() -> rule.validate(), 1000, 5000); }
Example #28
Source File: TestBlockScanner.java From big-c with Apache License 2.0 | 5 votes |
@Test(timeout=120000) public void testCorruptBlockHandling() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L); conf.set(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER, TestScanResultHandler.class.getName()); final TestContext ctx = new TestContext(conf, 1); final int NUM_EXPECTED_BLOCKS = 5; final int CORRUPT_INDEX = 3; ctx.createFiles(0, NUM_EXPECTED_BLOCKS, 4); ExtendedBlock badBlock = ctx.getFileBlock(0, CORRUPT_INDEX); ctx.cluster.corruptBlockOnDataNodes(badBlock); final TestScanResultHandler.Info info = TestScanResultHandler.getInfo(ctx.volumes.get(0)); synchronized (info) { info.shouldRun = true; info.notify(); } GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { synchronized (info) { return info.blocksScanned == NUM_EXPECTED_BLOCKS; } } }, 3, 30000); synchronized (info) { assertTrue(info.badBlocks.contains(badBlock)); for (int i = 0; i < NUM_EXPECTED_BLOCKS; i++) { if (i != CORRUPT_INDEX) { ExtendedBlock block = ctx.getFileBlock(0, i); assertTrue(info.goodBlocks.contains(block)); } } } ctx.close(); }
Example #29
Source File: TestSCMContainerManagerMetrics.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Test public void testReportProcessingMetrics() throws Exception { String volumeName = "vol1"; String bucketName = "bucket1"; String key = "key1"; MetricsRecordBuilder metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName()); Assert.assertEquals(getLongCounter("NumContainerReportsProcessedSuccessful", metrics), 1); // Create key should create container on DN. cluster.getRpcClient().getObjectStore().getClientProxy() .createVolume(volumeName); cluster.getRpcClient().getObjectStore().getClientProxy() .createBucket(volumeName, bucketName); OzoneOutputStream ozoneOutputStream = cluster.getRpcClient() .getObjectStore().getClientProxy().createKey(volumeName, bucketName, key, 0, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); String data = "file data"; ozoneOutputStream.write(data.getBytes(), 0, data.length()); ozoneOutputStream.close(); GenericTestUtils.waitFor(() -> { final MetricsRecordBuilder scmMetrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName()); return getLongCounter("NumICRReportsProcessedSuccessful", scmMetrics) == 1; }, 1000, 500000); }
Example #30
Source File: TestQueryNode.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Test(timeout = 10 * 1000L) public void testStaleNodesCount() throws Exception { cluster.shutdownHddsDatanode(0); cluster.shutdownHddsDatanode(1); GenericTestUtils.waitFor(() -> cluster.getStorageContainerManager().getNodeCount(STALE) == 2, 100, 4 * 1000); int nodeCount = scmClient.queryNode(STALE, HddsProtos.QueryScope.CLUSTER, "").size(); assertEquals("Mismatch of expected nodes count", 2, nodeCount); GenericTestUtils.waitFor(() -> cluster.getStorageContainerManager().getNodeCount(DEAD) == 2, 100, 4 * 1000); // Assert that we don't find any stale nodes. nodeCount = scmClient.queryNode(STALE, HddsProtos.QueryScope.CLUSTER, "").size(); assertEquals("Mismatch of expected nodes count", 0, nodeCount); // Assert that we find the expected number of dead nodes. nodeCount = scmClient.queryNode(DEAD, HddsProtos.QueryScope.CLUSTER, "").size(); assertEquals("Mismatch of expected nodes count", 2, nodeCount); }