Java Code Examples for org.apache.hadoop.test.GenericTestUtils#assertExceptionContains()
The following examples show how to use
org.apache.hadoop.test.GenericTestUtils#assertExceptionContains() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestOpensslCipher.java From hadoop with Apache License 2.0 | 6 votes |
@Test(timeout=120000) public void testDoFinalArguments() throws Exception { Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null); OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding"); Assert.assertTrue(cipher != null); cipher.init(OpensslCipher.ENCRYPT_MODE, key, iv); // Require direct buffer ByteBuffer output = ByteBuffer.allocate(1024); try { cipher.doFinal(output); Assert.fail("Output buffer should be direct buffer."); } catch (IllegalArgumentException e) { GenericTestUtils.assertExceptionContains( "Direct buffer is required", e); } }
Example 2
Source File: TestOzoneRpcClientAbstract.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Test public void testListPartsInvalidPartMarker() throws Exception { try { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, "random", -1, 2); } catch (IllegalArgumentException ex) { GenericTestUtils.assertExceptionContains("Should be greater than or " + "equal to zero", ex); } }
Example 3
Source File: TestQuorumJournalManager.java From big-c with Apache License 2.0 | 6 votes |
/** * Regression test for HDFS-3891: selectInputStreams should throw * an exception when a majority of journalnodes have crashed. */ @Test public void testSelectInputStreamsMajorityDown() throws Exception { // Shut down all of the JNs. cluster.shutdown(); List<EditLogInputStream> streams = Lists.newArrayList(); try { qjm.selectInputStreams(streams, 0, false); fail("Did not throw IOE"); } catch (QuorumException ioe) { GenericTestUtils.assertExceptionContains( "Got too many exceptions", ioe); assertTrue(streams.isEmpty()); } }
Example 4
Source File: TestEditLog.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testFailedOpen() throws Exception { File logDir = new File(TEST_DIR, "testFailedOpen"); logDir.mkdirs(); FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir); try { FileUtil.setWritable(logDir, false); log.openForWrite(); fail("Did no throw exception on only having a bad dir"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "too few journals successfully started", ioe); } finally { FileUtil.setWritable(logDir, true); log.close(); } }
Example 5
Source File: TestZKFailoverController.java From hadoop with Apache License 2.0 | 6 votes |
@Test(timeout=15000) public void testGracefulFailoverToUnhealthy() throws Exception { try { cluster.start(); cluster.waitForActiveLockHolder(0); // Mark it unhealthy, wait for it to exit election cluster.setHealthy(1, false); cluster.waitForElectorState(1, ActiveStandbyElector.State.INIT); // Ask for failover, it should fail, because it's unhealthy try { cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover(); fail("Did not fail to graceful failover to unhealthy service!"); } catch (ServiceFailedException sfe) { GenericTestUtils.assertExceptionContains( cluster.getService(1).toString() + " is not currently healthy.", sfe); } } finally { cluster.stop(); } }
Example 6
Source File: TestPermissionSymlinks.java From hadoop with Apache License 2.0 | 6 votes |
private void doDeleteLinkParentNotWritable() throws Exception { // Try to delete where the symlink's parent dir is not writable try { user.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws IOException { FileContext myfc = FileContext.getFileContext(conf); myfc.delete(link, false); return null; } }); fail("Deleted symlink without write permissions on parent!"); } catch (AccessControlException e) { GenericTestUtils.assertExceptionContains("Permission denied", e); } }
Example 7
Source File: TestEditLogJournalFailures.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testAllEditsDirsFailOnFlush() throws IOException { assertTrue(doAnEdit()); // Invalidate both edits journals. invalidateEditsDirAtIndex(0, true, false); invalidateEditsDirAtIndex(1, true, false); // The NN has not terminated (no ExitException thrown) try { doAnEdit(); fail("The previous edit could not be synced to any persistent storage, " + "should have halted the NN"); } catch (RemoteException re) { assertTrue(re.getClassName().contains("ExitException")); GenericTestUtils.assertExceptionContains( "Could not sync enough journals to persistent storage. " + "Unsynced transactions: 1", re); } }
Example 8
Source File: TestVirtualHostStyleFilter.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Test public void testIncorrectVirtualHostStyle() throws Exception { VirtualHostStyleFilter virtualHostStyleFilter = new VirtualHostStyleFilter(); virtualHostStyleFilter.setConfiguration(conf); ContainerRequest containerRequest = createContainerRequest("mybucket" + "localhost:9878", null, null, true); try { virtualHostStyleFilter.filter(containerRequest); fail("testIncorrectVirtualHostStyle failed"); } catch (InvalidRequestException ex) { GenericTestUtils.assertExceptionContains("invalid format", ex); } }
Example 9
Source File: TestCheckpoint.java From big-c with Apache License 2.0 | 5 votes |
/** * Assert that the given storage directory can't be locked, because * it's already locked. */ private static void assertLockFails(StorageDirectory sd) { try { sd.lock(); // If the above line didn't throw an exception, then // locking must not be supported assertFalse(sd.isLockSupported()); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains("already locked", ioe); } }
Example 10
Source File: TestQJMWithFaults.java From big-c with Apache License 2.0 | 5 votes |
private void checkException(Throwable t) { GenericTestUtils.assertExceptionContains("Injected", t); if (t.toString().contains("AssertionError")) { throw new RuntimeException("Should never see AssertionError in fault test!", t); } }
Example 11
Source File: TestSecondaryNameNodeUpgrade.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testChangeNsIDFails() throws IOException { try { doIt(ImmutableMap.of("namespaceID", "2")); Assert.fail("Should throw InconsistentFSStateException"); } catch(IOException e) { GenericTestUtils.assertExceptionContains("Inconsistent checkpoint fields", e); System.out.println("Correctly failed with inconsistent namespaceID: " + e); } }
Example 12
Source File: TestWebHDFS.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test snapshot creation through WebHdfs */ @Test public void testWebHdfsCreateSnapshot() throws Exception { MiniDFSCluster cluster = null; final Configuration conf = WebHdfsTestUtil.createConf(); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); final Path foo = new Path("/foo"); dfs.mkdirs(foo); try { webHdfs.createSnapshot(foo); fail("Cannot create snapshot on a non-snapshottable directory"); } catch (Exception e) { GenericTestUtils.assertExceptionContains( "Directory is not a snapshottable directory", e); } // allow snapshots on /foo dfs.allowSnapshot(foo); // create snapshots on foo using WebHdfs webHdfs.createSnapshot(foo, "s1"); // create snapshot without specifying name final Path spath = webHdfs.createSnapshot(foo, null); Assert.assertTrue(webHdfs.exists(spath)); final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1"); Assert.assertTrue(webHdfs.exists(s1path)); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 13
Source File: TestMover.java From big-c with Apache License 2.0 | 5 votes |
/** * Test Mover Cli by specifying a list of files/directories using option "-p". * There is only one namenode (and hence name service) specified in the conf. */ @Test public void testMoverCli() throws Exception { final MiniDFSCluster cluster = new MiniDFSCluster .Builder(new HdfsConfiguration()).numDataNodes(0).build(); try { final Configuration conf = cluster.getConfiguration(0); try { Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "bar"); Assert.fail("Expected exception for illegal path bar"); } catch (IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("bar is not absolute", e); } Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf); Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); Assert.assertEquals(1, namenodes.size()); Assert.assertEquals(1, movePaths.size()); URI nn = namenodes.iterator().next(); Assert.assertTrue(movePaths.containsKey(nn)); Assert.assertNull(movePaths.get(nn)); movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar"); namenodes = DFSUtil.getNsServiceRpcUris(conf); Assert.assertEquals(1, movePaths.size()); nn = namenodes.iterator().next(); Assert.assertTrue(movePaths.containsKey(nn)); checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar")); } finally { cluster.shutdown(); } }
Example 14
Source File: TestEditLog.java From big-c with Apache License 2.0 | 5 votes |
/** * Test loading an editlog with gaps. A single editlog directory * is set up. On of the edit log files is deleted. This should * fail when selecting the input streams as it will not be able * to select enough streams to load up to 4*TXNS_PER_ROLL. * There should be 4*TXNS_PER_ROLL transactions as we rolled 3 * times. */ @Test public void testLoadingWithGaps() throws IOException { File f1 = new File(TEST_DIR + "/gaptest0"); List<URI> editUris = ImmutableList.of(f1.toURI()); NNStorage storage = setupEdits(editUris, 3); final long startGapTxId = 1*TXNS_PER_ROLL + 1; final long endGapTxId = 2*TXNS_PER_ROLL; File[] files = new File(f1, "current").listFiles(new FilenameFilter() { @Override public boolean accept(File dir, String name) { if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId, endGapTxId))) { return true; } return false; } }); assertEquals(1, files.length); assertTrue(files[0].delete()); FSEditLog editlog = getFSEditLog(storage); editlog.initJournalsForWrite(); long startTxId = 1; try { editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL); fail("Should have thrown exception"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "Gap in transactions. Expected to be able to read up until " + "at least txid 40 but unable to find any edit logs containing " + "txid 11", ioe); } }
Example 15
Source File: TestCheckpoint.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test case where the NN is configured with a name-only and an edits-only * dir, with storage-restore turned on. In this case, if the name-only dir * disappears and comes back, a new checkpoint after it has been restored * should function correctly. * @throws Exception */ @Test public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; File currentDir = null; Configuration conf = new HdfsConfiguration(); File base_dir = new File(MiniDFSCluster.getBaseDirectory()); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, MiniDFSCluster.getBaseDirectory() + "/name-only"); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, MiniDFSCluster.getBaseDirectory() + "/edits-only"); conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(base_dir, "namesecondary1")).toString()); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true) .manageNameDfsDirs(false).build(); secondary = startSecondaryNameNode(conf); // Checkpoint once secondary.doCheckpoint(); // Now primary NN experiences failure of its only name dir -- fake by // setting its current dir to a-x permissions NamenodeProtocols nn = cluster.getNameNodeRpc(); NNStorage storage = cluster.getNameNode().getFSImage().getStorage(); StorageDirectory sd0 = storage.getStorageDir(0); assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType()); currentDir = sd0.getCurrentDir(); assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000")); // Try to upload checkpoint -- this should fail since there are no // valid storage dirs try { secondary.doCheckpoint(); fail("Did not fail to checkpoint when there are no valid storage dirs"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "No targets in destination storage", ioe); } // Restore the good dir assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755")); nn.restoreFailedStorage("true"); nn.rollEditLog(); // Checkpoint again -- this should upload to the restored name dir secondary.doCheckpoint(); assertNNHasCheckpoints(cluster, ImmutableList.of(8)); assertParallelFilesInvariant(cluster, ImmutableList.of(secondary)); } finally { if (currentDir != null) { FileUtil.chmod(currentDir.getAbsolutePath(), "755"); } cleanup(secondary); secondary = null; cleanup(cluster); cluster = null; } }
Example 16
Source File: TestRenameWithSnapshots.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test the rename undo when removing dst node fails */ @Test public void testRenameUndo_6() throws Exception { final Path test = new Path("/test"); final Path dir1 = new Path(test, "dir1"); final Path dir2 = new Path(test, "dir2"); final Path sub_dir2 = new Path(dir2, "subdir"); final Path subsub_dir2 = new Path(sub_dir2, "subdir"); hdfs.mkdirs(dir1); hdfs.mkdirs(subsub_dir2); final Path foo = new Path(dir1, "foo"); hdfs.mkdirs(foo); SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1"); SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2"); // set ns quota of dir2 to 4, so the current remaining is 1 (already has // dir2, sub_dir2, and subsub_dir2) hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1); FSDirectory fsdir2 = Mockito.spy(fsdir); Mockito.doThrow(new RuntimeException("fake exception")).when(fsdir2) .removeLastINode((INodesInPath) Mockito.anyObject()); Whitebox.setInternalState(fsn, "dir", fsdir2); // rename /test/dir1/foo to /test/dir2/sub_dir2/subsub_dir2. // FSDirectory#verifyQuota4Rename will pass since foo only be counted // as 1 in NS quota. However, the rename operation will fail when removing // subsub_dir2. try { hdfs.rename(foo, subsub_dir2, Rename.OVERWRITE); fail("Expect QuotaExceedException"); } catch (Exception e) { String msg = "fake exception"; GenericTestUtils.assertExceptionContains(msg, e); } // check the undo assertTrue(hdfs.exists(foo)); INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString()) .asDirectory(); List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node .getChildrenList(Snapshot.CURRENT_STATE_ID)); assertEquals(1, childrenList.size()); INode fooNode = childrenList.get(0); assertTrue(fooNode.asDirectory().isWithSnapshot()); assertSame(dir1Node, fooNode.getParent()); List<DirectoryDiff> diffList = dir1Node .getDiffs().asList(); assertEquals(1, diffList.size()); DirectoryDiff diff = diffList.get(0); assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty()); assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty()); // check dir2 INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory(); assertTrue(dir2Node.isSnapshottable()); QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite()); assertEquals(3, counts.getNameSpace()); assertEquals(0, counts.getStorageSpace()); childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory() .getChildrenList(Snapshot.CURRENT_STATE_ID)); assertEquals(1, childrenList.size()); INode subdir2Node = childrenList.get(0); assertSame(dir2Node, subdir2Node.getParent()); assertSame(subdir2Node, fsdir2.getINode4Write(sub_dir2.toString())); INode subsubdir2Node = fsdir2.getINode4Write(subsub_dir2.toString()); assertTrue(subsubdir2Node.getClass() == INodeDirectory.class); assertSame(subdir2Node, subsubdir2Node.getParent()); diffList = ( dir2Node).getDiffs().asList(); assertEquals(1, diffList.size()); diff = diffList.get(0); assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty()); assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty()); }
Example 17
Source File: TestRenameWithSnapshots.java From big-c with Apache License 2.0 | 4 votes |
/** * Test undo where dst node being overwritten is a reference node */ @Test public void testRenameUndo_4() throws Exception { final Path sdir1 = new Path("/dir1"); final Path sdir2 = new Path("/dir2"); final Path sdir3 = new Path("/dir3"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); hdfs.mkdirs(sdir3); final Path foo = new Path(sdir1, "foo"); final Path bar = new Path(foo, "bar"); DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED); final Path foo2 = new Path(sdir2, "foo2"); hdfs.mkdirs(foo2); SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1"); SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2"); // rename foo2 to foo3, so that foo3 will be a reference node final Path foo3 = new Path(sdir3, "foo3"); hdfs.rename(foo2, foo3); INode foo3Node = fsdir.getINode4Write(foo3.toString()); assertTrue(foo3Node.isReference()); INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory(); INodeDirectory mockDir3 = spy(dir3); // fail the rename but succeed in undo doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(), anyBoolean(), Mockito.anyInt()); Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), anyBoolean(), Mockito.anyInt())).thenReturn(false).thenCallRealMethod(); INodeDirectory root = fsdir.getINode4Write("/").asDirectory(); root.replaceChild(dir3, mockDir3, fsdir.getINodeMap()); foo3Node.setParent(mockDir3); try { hdfs.rename(foo, foo3, Rename.OVERWRITE); fail("the rename from " + foo + " to " + foo3 + " should fail"); } catch (IOException e) { GenericTestUtils.assertExceptionContains("rename from " + foo + " to " + foo3 + " failed.", e); } // make sure the undo is correct final INode foo3Node_undo = fsdir.getINode4Write(foo3.toString()); assertSame(foo3Node, foo3Node_undo); INodeReference.WithCount foo3_wc = (WithCount) foo3Node.asReference() .getReferredINode(); assertEquals(2, foo3_wc.getReferenceCount()); assertSame(foo3Node, foo3_wc.getParentReference()); }
Example 18
Source File: TestHAStateTransitions.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test which takes a single node and flip flops between * active and standby mode, making sure it doesn't * double-play any edits. */ @Test(timeout = 300000) public void testTransitionActiveToStandby() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1) .build(); try { cluster.waitActive(); cluster.transitionToActive(0); FileSystem fs = cluster.getFileSystem(0); fs.mkdirs(TEST_DIR); cluster.transitionToStandby(0); try { fs.mkdirs(new Path("/x")); fail("Didn't throw trying to mutate FS in standby state"); } catch (Throwable t) { GenericTestUtils.assertExceptionContains( "Operation category WRITE is not supported", t); } cluster.transitionToActive(0); // Create a file, then delete the whole directory recursively. DFSTestUtil.createFile(fs, new Path(TEST_DIR, "foo"), 10, (short)1, 1L); fs.delete(TEST_DIR, true); // Now if the standby tries to replay the last segment that it just // wrote as active, it would fail since it's trying to create a file // in a non-existent directory. cluster.transitionToStandby(0); cluster.transitionToActive(0); assertFalse(fs.exists(TEST_DIR)); } finally { cluster.shutdown(); } }
Example 19
Source File: TestOzoneManagerHAWithData.java From hadoop-ozone with Apache License 2.0 | 4 votes |
private void createKeyTest(boolean checkSuccess) throws Exception { String userName = "user" + RandomStringUtils.randomNumeric(5); String adminName = "admin" + RandomStringUtils.randomNumeric(5); String volumeName = "volume" + RandomStringUtils.randomNumeric(5); VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() .setOwner(userName) .setAdmin(adminName) .build(); try { getObjectStore().createVolume(volumeName, createVolumeArgs); OzoneVolume retVolumeinfo = getObjectStore().getVolume(volumeName); Assert.assertTrue(retVolumeinfo.getName().equals(volumeName)); Assert.assertTrue(retVolumeinfo.getOwner().equals(userName)); Assert.assertTrue(retVolumeinfo.getAdmin().equals(adminName)); String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); retVolumeinfo.createBucket(bucketName); OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName); Assert.assertTrue(ozoneBucket.getName().equals(bucketName)); Assert.assertTrue(ozoneBucket.getVolumeName().equals(volumeName)); String value = "random data"; OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, value.length(), ReplicationType.STAND_ALONE, ReplicationFactor.ONE, new HashMap<>()); ozoneOutputStream.write(value.getBytes(), 0, value.length()); ozoneOutputStream.close(); OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName); byte[] fileContent = new byte[value.getBytes().length]; ozoneInputStream.read(fileContent); Assert.assertEquals(value, new String(fileContent)); } catch (ConnectException | RemoteException e) { if (!checkSuccess) { // If the last OM to be tried by the RetryProxy is down, we would get // ConnectException. Otherwise, we would get a RemoteException from the // last running OM as it would fail to get a quorum. if (e instanceof RemoteException) { GenericTestUtils.assertExceptionContains( "OMNotLeaderException", e); } } else { throw e; } } }
Example 20
Source File: TestCheckpoint.java From hadoop with Apache License 2.0 | 4 votes |
/** * Run a test where the 2NN runs into some kind of error when * sending the checkpoint back to the NN. * @param exceptionSubstring an expected substring of the triggered exception */ private void doSendFailTest(String exceptionSubstring) throws IOException { Configuration conf = new HdfsConfiguration(); Path file1 = new Path("checkpoint-doSendFailTest-doSendFailTest.dat"); MiniDFSCluster cluster = null; FileSystem fileSys = null; SecondaryNameNode secondary = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) .build(); cluster.waitActive(); fileSys = cluster.getFileSystem(); assertTrue(!fileSys.exists(file1)); // // Make the checkpoint fail after rolling the edit log. // secondary = startSecondaryNameNode(conf); try { secondary.doCheckpoint(); // this should fail fail("Did not get expected exception"); } catch (IOException e) { // We only sent part of the image. Have to trigger this exception GenericTestUtils.assertExceptionContains(exceptionSubstring, e); } Mockito.reset(faultInjector); // Make sure there is no temporary files left around. checkTempImages(cluster.getNameNode().getFSImage().getStorage()); checkTempImages(secondary.getFSImage().getStorage()); secondary.shutdown(); // secondary namenode crash! secondary = null; // start new instance of secondary and verify that // a new rollEditLog succedes in spite of the fact that we had // a partially failed checkpoint previously. // secondary = startSecondaryNameNode(conf); secondary.doCheckpoint(); // this should work correctly // // Create a new file // writeFile(fileSys, file1, replication); checkFile(fileSys, file1, replication); } finally { fileSys.close(); cleanup(secondary); secondary = null; cleanup(cluster); cluster = null; } }