org.apache.hadoop.test.GenericTestUtils.LogCapturer Java Examples
The following examples show how to use
org.apache.hadoop.test.GenericTestUtils.LogCapturer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestVolumeSet.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Test public void testRemoveVolume() throws Exception { assertEquals(2, volumeSet.getVolumesList().size()); // Remove a volume from VolumeSet volumeSet.removeVolume(volume1); assertEquals(1, volumeSet.getVolumesList().size()); // Attempting to remove a volume which does not exist in VolumeSet should // log a warning. LogCapturer logs = LogCapturer.captureLogs( LogFactory.getLog(MutableVolumeSet.class)); volumeSet.removeVolume(volume1); assertEquals(1, volumeSet.getVolumesList().size()); String expectedLogMessage = "Volume : " + HddsVolumeUtil.getHddsRoot(volume1) + " does not exist in VolumeSet"; assertTrue("Log output does not contain expected log message: " + expectedLogMessage, logs.getOutput().contains(expectedLogMessage)); }
Example #2
Source File: TestSecureOzoneCluster.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Tests the secure om Initialization success. */ @Test public void testSecureOmInitializationSuccess() throws Exception { initSCM(); // Create a secure SCM instance as om client will connect to it scm = StorageContainerManager.createSCM(conf); LogCapturer logs = LogCapturer.captureLogs(OzoneManager.getLogger()); GenericTestUtils.setLogLevel(OzoneManager.getLogger(), INFO); setupOm(conf); try { om.start(); } catch (Exception ex) { // Expects timeout failure from scmClient in om but om user login via // kerberos should succeed. assertTrue(logs.getOutput().contains("Ozone Manager login successful")); } }
Example #3
Source File: TestCheckpoint.java From big-c with Apache License 2.0 | 5 votes |
/** * Test that, an attempt to lock a storage that is already locked by nodename, * logs error message that includes JVM name of the namenode that locked it. */ @Test public void testStorageAlreadyLockedErrorMessage() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; StorageDirectory savedSd = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); NNStorage storage = cluster.getNameNode().getFSImage().getStorage(); for (StorageDirectory sd : storage.dirIterable(null)) { assertLockFails(sd); savedSd = sd; } LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(Storage.class)); try { // try to lock the storage that's already locked savedSd.lock(); fail("Namenode should not be able to lock a storage" + " that is already locked"); } catch (IOException ioe) { // cannot read lock file on Windows, so message cannot get JVM name String lockingJvmName = Path.WINDOWS ? "" : " " + ManagementFactory.getRuntimeMXBean().getName(); String expectedLogMessage = "It appears that another node " + lockingJvmName + " has already locked the storage directory"; assertTrue("Log output does not contain expected log message: " + expectedLogMessage, logs.getOutput().contains(expectedLogMessage)); } } finally { cleanup(cluster); cluster = null; } }
Example #4
Source File: TestBootstrapStandby.java From big-c with Apache License 2.0 | 5 votes |
/** * Test for the case where the shared edits dir doesn't have * all of the recent edit logs. */ @Test public void testSharedEditsMissingLogs() throws Exception { removeStandbyNameDirs(); CheckpointSignature sig = nn0.getRpcServer().rollEditLog(); assertEquals(3, sig.getCurSegmentTxId()); // Should have created edits_1-2 in shared edits dir URI editsUri = cluster.getSharedEditsDir(0, 1); File editsDir = new File(editsUri); File editsSegment = new File(new File(editsDir, "current"), NNStorage.getFinalizedEditsFileName(1, 2)); GenericTestUtils.assertExists(editsSegment); // Delete the segment. assertTrue(editsSegment.delete()); // Trying to bootstrap standby should now fail since the edit // logs aren't available in the shared dir. LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(BootstrapStandby.class)); try { int rc = BootstrapStandby.run( new String[]{"-force"}, cluster.getConfiguration(1)); assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE, rc); } finally { logs.stopCapturing(); } GenericTestUtils.assertMatches(logs.getOutput(), "FATAL.*Unable to read transaction ids 1-3 from the configured shared"); }
Example #5
Source File: TestEncryptedTransfer.java From big-c with Apache License 2.0 | 5 votes |
private void testEncryptedWrite(int numDns) throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); setEncryptionConfigKeys(conf); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDns).build(); FileSystem fs = getFileSystem(conf); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(SaslDataTransferServer.class)); LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(DataTransferSaslUtil.class)); try { writeTestDataToFile(fs); } finally { logs.stopCapturing(); logs1.stopCapturing(); } assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); fs.close(); if (resolverClazz == null) { // Test client and server negotiate cipher option GenericTestUtils.assertDoesNotMatch(logs.getOutput(), "Server using cipher suite"); // Check the IOStreamPair GenericTestUtils.assertDoesNotMatch(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream."); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #6
Source File: TestSaslDataTransfer.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testServerSaslNoClientSasl() throws Exception { HdfsConfiguration clusterConf = createSecureConfig( "authentication,integrity,privacy"); // Set short retry timeouts so this test runs faster clusterConf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10); startCluster(clusterConf); HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf); clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, ""); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(DataNode.class)); try { doTest(clientConf); Assert.fail("Should fail if SASL data transfer protection is not " + "configured or not supported in client"); } catch (IOException e) { GenericTestUtils.assertMatches(e.getMessage(), "could only be replicated to 0 nodes"); } finally { logs.stopCapturing(); } GenericTestUtils.assertMatches(logs.getOutput(), "Failed to read expected SASL data transfer protection " + "handshake from client at"); }
Example #7
Source File: TestCheckpoint.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test that, an attempt to lock a storage that is already locked by nodename, * logs error message that includes JVM name of the namenode that locked it. */ @Test public void testStorageAlreadyLockedErrorMessage() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; StorageDirectory savedSd = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); NNStorage storage = cluster.getNameNode().getFSImage().getStorage(); for (StorageDirectory sd : storage.dirIterable(null)) { assertLockFails(sd); savedSd = sd; } LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(Storage.class)); try { // try to lock the storage that's already locked savedSd.lock(); fail("Namenode should not be able to lock a storage" + " that is already locked"); } catch (IOException ioe) { // cannot read lock file on Windows, so message cannot get JVM name String lockingJvmName = Path.WINDOWS ? "" : " " + ManagementFactory.getRuntimeMXBean().getName(); String expectedLogMessage = "It appears that another node " + lockingJvmName + " has already locked the storage directory"; assertTrue("Log output does not contain expected log message: " + expectedLogMessage, logs.getOutput().contains(expectedLogMessage)); } } finally { cleanup(cluster); cluster = null; } }
Example #8
Source File: TestBootstrapStandby.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test for the case where the shared edits dir doesn't have * all of the recent edit logs. */ @Test public void testSharedEditsMissingLogs() throws Exception { removeStandbyNameDirs(); CheckpointSignature sig = nn0.getRpcServer().rollEditLog(); assertEquals(3, sig.getCurSegmentTxId()); // Should have created edits_1-2 in shared edits dir URI editsUri = cluster.getSharedEditsDir(0, 1); File editsDir = new File(editsUri); File editsSegment = new File(new File(editsDir, "current"), NNStorage.getFinalizedEditsFileName(1, 2)); GenericTestUtils.assertExists(editsSegment); // Delete the segment. assertTrue(editsSegment.delete()); // Trying to bootstrap standby should now fail since the edit // logs aren't available in the shared dir. LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(BootstrapStandby.class)); try { int rc = BootstrapStandby.run( new String[]{"-force"}, cluster.getConfiguration(1)); assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE, rc); } finally { logs.stopCapturing(); } GenericTestUtils.assertMatches(logs.getOutput(), "FATAL.*Unable to read transaction ids 1-3 from the configured shared"); }
Example #9
Source File: TestEncryptedTransfer.java From hadoop with Apache License 2.0 | 5 votes |
private void testEncryptedWrite(int numDns) throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); setEncryptionConfigKeys(conf); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDns).build(); FileSystem fs = getFileSystem(conf); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(SaslDataTransferServer.class)); LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(DataTransferSaslUtil.class)); try { writeTestDataToFile(fs); } finally { logs.stopCapturing(); logs1.stopCapturing(); } assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); fs.close(); if (resolverClazz == null) { // Test client and server negotiate cipher option GenericTestUtils.assertDoesNotMatch(logs.getOutput(), "Server using cipher suite"); // Check the IOStreamPair GenericTestUtils.assertDoesNotMatch(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream."); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #10
Source File: TestSaslDataTransfer.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testServerSaslNoClientSasl() throws Exception { HdfsConfiguration clusterConf = createSecureConfig( "authentication,integrity,privacy"); // Set short retry timeouts so this test runs faster clusterConf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10); startCluster(clusterConf); HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf); clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, ""); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(DataNode.class)); try { doTest(clientConf); Assert.fail("Should fail if SASL data transfer protection is not " + "configured or not supported in client"); } catch (IOException e) { GenericTestUtils.assertMatches(e.getMessage(), "could only be replicated to 0 nodes"); } finally { logs.stopCapturing(); } GenericTestUtils.assertMatches(logs.getOutput(), "Failed to read expected SASL data transfer protection " + "handshake from client at"); }
Example #11
Source File: TestBlockDeletion.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Waits for datanode command to be retried when datanode is dead. */ private void waitForDatanodeCommandRetry() throws TimeoutException, InterruptedException { cluster.shutdownHddsDatanode(0); LogCapturer logCapturer = LogCapturer.captureLogs(RetriableDatanodeEventWatcher.LOG); logCapturer.clearOutput(); GenericTestUtils.waitFor(() -> logCapturer.getOutput() .contains("RetriableDatanodeCommand type=deleteBlocksCommand"), 500, 5000); cluster.restartHddsDatanode(0, true); }
Example #12
Source File: TestBlockDeletion.java From hadoop-ozone with Apache License 2.0 | 5 votes |
private void waitForDatanodeBlockDeletionStart() throws TimeoutException, InterruptedException { LogCapturer logCapturer = LogCapturer.captureLogs(DeleteBlocksCommandHandler.LOG); logCapturer.clearOutput(); GenericTestUtils.waitFor(() -> logCapturer.getOutput() .contains("Start to delete container block"), 500, 10000); Thread.sleep(1000); }
Example #13
Source File: TestSecureOzoneCluster.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Test functionality to get SCM signed certificate for OM. */ @Test public void testSecureOmInitSuccess() throws Exception { LogCapturer omLogs = LogCapturer.captureLogs(OzoneManager.getLogger()); omLogs.clearOutput(); initSCM(); try { scm = HddsTestUtils.getScm(conf); scm.start(); OMStorage omStore = new OMStorage(conf); initializeOmStorage(omStore); OzoneManager.setTestSecureOmFlag(true); om = OzoneManager.createOm(conf); assertNotNull(om.getCertificateClient()); assertNotNull(om.getCertificateClient().getPublicKey()); assertNotNull(om.getCertificateClient().getPrivateKey()); assertNotNull(om.getCertificateClient().getCertificate()); assertTrue(omLogs.getOutput().contains("Init response: GETCERT")); assertTrue(omLogs.getOutput().contains("Successfully stored " + "SCM signed certificate")); X509Certificate certificate = om.getCertificateClient().getCertificate(); validateCertificate(certificate); String pemEncodedCACert = scm.getSecurityProtocolServer().getCACertificate(); X509Certificate caCert = CertificateCodec.getX509Cert(pemEncodedCACert); X509Certificate caCertStored = om.getCertificateClient() .getCertificate(caCert.getSerialNumber().toString()); assertEquals(caCert, caCertStored); } finally { if (scm != null) { scm.stop(); } IOUtils.closeQuietly(om); } }
Example #14
Source File: TestEncryptedTransfer.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testEncryptedRead() throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); FileChecksum checksum = fs.getFileChecksum(TEST_PATH); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(conf); cluster = new MiniDFSCluster.Builder(conf) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .format(false) .startupOption(StartupOption.REGULAR) .build(); fs = getFileSystem(conf); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(SaslDataTransferServer.class)); LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(DataTransferSaslUtil.class)); try { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); } finally { logs.stopCapturing(); logs1.stopCapturing(); } fs.close(); if (resolverClazz == null) { // Test client and server negotiate cipher option GenericTestUtils.assertDoesNotMatch(logs.getOutput(), "Server using cipher suite"); // Check the IOStreamPair GenericTestUtils.assertDoesNotMatch(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream."); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #15
Source File: TestEncryptedTransfer.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testEncryptedReadWithRC4() throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); FileChecksum checksum = fs.getFileChecksum(TEST_PATH); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(conf); // It'll use 3DES by default, but we set it to rc4 here. conf.set(DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY, "rc4"); cluster = new MiniDFSCluster.Builder(conf) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .format(false) .startupOption(StartupOption.REGULAR) .build(); fs = getFileSystem(conf); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(SaslDataTransferServer.class)); LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(DataTransferSaslUtil.class)); try { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); } finally { logs.stopCapturing(); logs1.stopCapturing(); } fs.close(); if (resolverClazz == null) { // Test client and server negotiate cipher option GenericTestUtils.assertDoesNotMatch(logs.getOutput(), "Server using cipher suite"); // Check the IOStreamPair GenericTestUtils.assertDoesNotMatch(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream."); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #16
Source File: TestEncryptedTransfer.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testEncryptedReadWithAES() throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); conf.set(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, "AES/CTR/NoPadding"); cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); FileChecksum checksum = fs.getFileChecksum(TEST_PATH); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(conf); cluster = new MiniDFSCluster.Builder(conf) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .format(false) .startupOption(StartupOption.REGULAR) .build(); fs = getFileSystem(conf); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(SaslDataTransferServer.class)); LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(DataTransferSaslUtil.class)); try { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); } finally { logs.stopCapturing(); logs1.stopCapturing(); } fs.close(); if (resolverClazz == null) { // Test client and server negotiate cipher option GenericTestUtils.assertMatches(logs.getOutput(), "Server using cipher suite"); // Check the IOStreamPair GenericTestUtils.assertMatches(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream."); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #17
Source File: TestEncryptedTransfer.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testClientThatDoesNotSupportEncryption() throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); // Set short retry timeouts so this test runs faster conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10); cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(conf); cluster = new MiniDFSCluster.Builder(conf) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .format(false) .startupOption(StartupOption.REGULAR) .build(); fs = getFileSystem(conf); DFSClient client = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs); DFSClient spyClient = Mockito.spy(client); Mockito.doReturn(false).when(spyClient).shouldEncryptData(); DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(DataNode.class)); try { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){ fail("Should not have been able to read without encryption enabled."); } } catch (IOException ioe) { GenericTestUtils.assertExceptionContains("Could not obtain block:", ioe); } finally { logs.stopCapturing(); } fs.close(); if (resolverClazz == null) { GenericTestUtils.assertMatches(logs.getOutput(), "Failed to read expected encryption handshake from client at"); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #18
Source File: TestSecureOzoneCluster.java From hadoop-ozone with Apache License 2.0 | 4 votes |
/** * Tests functionality to init secure OM when it is already initialized. */ @Test public void testSecureOmReInit() throws Exception { LogCapturer omLogs = LogCapturer.captureLogs(OzoneManager.getLogger()); omLogs.clearOutput(); /* * As all these processes run inside the same JVM, there are issues around * the Hadoop UGI if different processes run with different principals. * In this test, the OM has to contact the SCM to download certs. SCM runs * as scm/host@REALM, but the OM logs in as om/host@REALM, and then the test * fails, and the OM is unable to contact the SCM due to kerberos login * issues. To work around that, have the OM run as the same principal as the * SCM, and then the test passes. * * TODO: Need to look into this further to see if there is a better way to * address this problem. */ String realm = miniKdc.getRealm(); conf.set(OZONE_OM_KERBEROS_PRINCIPAL_KEY, "scm/" + host + "@" + realm); omKeyTab = new File(workDir, "scm.keytab"); conf.set(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY, omKeyTab.getAbsolutePath()); initSCM(); try { scm = HddsTestUtils.getScm(conf); scm.start(); conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, false); OMStorage omStore = new OMStorage(conf); initializeOmStorage(omStore); OzoneManager.setTestSecureOmFlag(true); om = OzoneManager.createOm(conf); assertNull(om.getCertificateClient()); assertFalse(omLogs.getOutput().contains("Init response: GETCERT")); assertFalse(omLogs.getOutput().contains("Successfully stored " + "SCM signed certificate")); conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); OzoneManager.omInit(conf); om.stop(); om = OzoneManager.createOm(conf); assertNotNull(om.getCertificateClient()); assertNotNull(om.getCertificateClient().getPublicKey()); assertNotNull(om.getCertificateClient().getPrivateKey()); assertNotNull(om.getCertificateClient().getCertificate()); assertTrue(omLogs.getOutput().contains("Init response: GETCERT")); assertTrue(omLogs.getOutput().contains("Successfully stored " + "SCM signed certificate")); X509Certificate certificate = om.getCertificateClient().getCertificate(); validateCertificate(certificate); } finally { if (scm != null) { scm.stop(); } } }
Example #19
Source File: TestEncryptedTransfer.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testEncryptedRead() throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); FileChecksum checksum = fs.getFileChecksum(TEST_PATH); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(conf); cluster = new MiniDFSCluster.Builder(conf) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .format(false) .startupOption(StartupOption.REGULAR) .build(); fs = getFileSystem(conf); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(SaslDataTransferServer.class)); LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(DataTransferSaslUtil.class)); try { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); } finally { logs.stopCapturing(); logs1.stopCapturing(); } fs.close(); if (resolverClazz == null) { // Test client and server negotiate cipher option GenericTestUtils.assertDoesNotMatch(logs.getOutput(), "Server using cipher suite"); // Check the IOStreamPair GenericTestUtils.assertDoesNotMatch(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream."); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #20
Source File: TestEncryptedTransfer.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testEncryptedReadWithRC4() throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); FileChecksum checksum = fs.getFileChecksum(TEST_PATH); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(conf); // It'll use 3DES by default, but we set it to rc4 here. conf.set(DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY, "rc4"); cluster = new MiniDFSCluster.Builder(conf) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .format(false) .startupOption(StartupOption.REGULAR) .build(); fs = getFileSystem(conf); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(SaslDataTransferServer.class)); LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(DataTransferSaslUtil.class)); try { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); } finally { logs.stopCapturing(); logs1.stopCapturing(); } fs.close(); if (resolverClazz == null) { // Test client and server negotiate cipher option GenericTestUtils.assertDoesNotMatch(logs.getOutput(), "Server using cipher suite"); // Check the IOStreamPair GenericTestUtils.assertDoesNotMatch(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream."); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #21
Source File: TestEncryptedTransfer.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testEncryptedReadWithAES() throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); conf.set(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, "AES/CTR/NoPadding"); cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); FileChecksum checksum = fs.getFileChecksum(TEST_PATH); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(conf); cluster = new MiniDFSCluster.Builder(conf) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .format(false) .startupOption(StartupOption.REGULAR) .build(); fs = getFileSystem(conf); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(SaslDataTransferServer.class)); LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(DataTransferSaslUtil.class)); try { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); } finally { logs.stopCapturing(); logs1.stopCapturing(); } fs.close(); if (resolverClazz == null) { // Test client and server negotiate cipher option GenericTestUtils.assertMatches(logs.getOutput(), "Server using cipher suite"); // Check the IOStreamPair GenericTestUtils.assertMatches(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream."); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #22
Source File: TestEncryptedTransfer.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testClientThatDoesNotSupportEncryption() throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); // Set short retry timeouts so this test runs faster conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10); cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(conf); cluster = new MiniDFSCluster.Builder(conf) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .format(false) .startupOption(StartupOption.REGULAR) .build(); fs = getFileSystem(conf); DFSClient client = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs); DFSClient spyClient = Mockito.spy(client); Mockito.doReturn(false).when(spyClient).shouldEncryptData(); DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(DataNode.class)); try { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){ fail("Should not have been able to read without encryption enabled."); } } catch (IOException ioe) { GenericTestUtils.assertExceptionContains("Could not obtain block:", ioe); } finally { logs.stopCapturing(); } fs.close(); if (resolverClazz == null) { GenericTestUtils.assertMatches(logs.getOutput(), "Failed to read expected encryption handshake from client at"); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #23
Source File: TestSecureOzoneCluster.java From hadoop-ozone with Apache License 2.0 | 4 votes |
/** * Tests delegation token renewal. */ @Test public void testDelegationTokenRenewal() throws Exception { GenericTestUtils .setLogLevel(LoggerFactory.getLogger(Server.class.getName()), INFO); LogCapturer omLogs = LogCapturer.captureLogs(OzoneManager.getLogger()); // Setup secure OM for start. OzoneConfiguration newConf = new OzoneConfiguration(conf); int tokenMaxLifetime = 1000; newConf.setLong(DELEGATION_TOKEN_MAX_LIFETIME_KEY, tokenMaxLifetime); setupOm(newConf); long omVersion = RPC.getProtocolVersion(OzoneManagerProtocolPB.class); OzoneManager.setTestSecureOmFlag(true); // Start OM try { om.setCertClient(new CertificateClientTestImpl(conf)); om.start(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); // Get first OM client which will authenticate via Kerberos omClient = new OzoneManagerProtocolClientSideTranslatorPB( OmTransportFactory.create(conf, ugi, null), RandomStringUtils.randomAscii(5)); // Since client is already connected get a delegation token Token<OzoneTokenIdentifier> token = omClient.getDelegationToken( new Text("om")); // Check if token is of right kind and renewer is running om instance assertNotNull(token); assertEquals("OzoneToken", token.getKind().toString()); assertEquals(OmUtils.getOmRpcAddress(conf), token.getService().toString()); // Renew delegation token long expiryTime = omClient.renewDelegationToken(token); assertTrue(expiryTime > 0); omLogs.clearOutput(); // Test failure of delegation renewal // 1. When token maxExpiryTime exceeds Thread.sleep(tokenMaxLifetime); OMException ex = LambdaTestUtils.intercept(OMException.class, "TOKEN_EXPIRED", () -> omClient.renewDelegationToken(token)); assertEquals(TOKEN_EXPIRED, ex.getResult()); omLogs.clearOutput(); // 2. When renewer doesn't match (implicitly covers when renewer is // null or empty ) Token<OzoneTokenIdentifier> token2 = omClient.getDelegationToken( new Text("randomService")); assertNotNull(token2); LambdaTestUtils.intercept(OMException.class, "Delegation token renewal failed", () -> omClient.renewDelegationToken(token2)); assertTrue(omLogs.getOutput().contains(" with non-matching " + "renewer randomService")); omLogs.clearOutput(); // 3. Test tampered token OzoneTokenIdentifier tokenId = OzoneTokenIdentifier.readProtoBuf( token.getIdentifier()); tokenId.setRenewer(new Text("om")); tokenId.setMaxDate(System.currentTimeMillis() * 2); Token<OzoneTokenIdentifier> tamperedToken = new Token<>( tokenId.getBytes(), token2.getPassword(), token2.getKind(), token2.getService()); LambdaTestUtils.intercept(OMException.class, "Delegation token renewal failed", () -> omClient.renewDelegationToken(tamperedToken)); assertTrue(omLogs.getOutput().contains("can't be found in " + "cache")); omLogs.clearOutput(); } finally { om.stop(); om.join(); } }
Example #24
Source File: TestSpaceUsageFactory.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Before public void setUp() { capturer = LogCapturer.captureLogs( LoggerFactory.getLogger(SpaceUsageCheckFactory.class)); }
Example #25
Source File: TestBlockDeletingService.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Test public void testBlockDeletionTimeout() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); ContainerSet containerSet = new ContainerSet(); createToDeleteBlocks(containerSet, conf, 1, 3, 1); // set timeout value as 1ns to trigger timeout behavior long timeout = 1; OzoneContainer ozoneContainer = mockDependencies(containerSet); BlockDeletingService svc = new BlockDeletingService(ozoneContainer, TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.NANOSECONDS, conf); svc.start(); LogCapturer log = LogCapturer.captureLogs(BackgroundService.LOG); GenericTestUtils.waitFor(() -> { if(log.getOutput().contains( "Background task executes timed out, retrying in next interval")) { log.stopCapturing(); return true; } return false; }, 100, 1000); log.stopCapturing(); svc.shutdown(); // test for normal case that doesn't have timeout limitation timeout = 0; createToDeleteBlocks(containerSet, conf, 1, 3, 1); svc = new BlockDeletingService(ozoneContainer, TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.MILLISECONDS, conf); svc.start(); // get container meta data KeyValueContainer container = (KeyValueContainer) containerSet.getContainerIterator().next(); KeyValueContainerData data = container.getContainerData(); try (ReferenceCountedDB meta = BlockUtils.getDB(data, conf)) { LogCapturer newLog = LogCapturer.captureLogs(BackgroundService.LOG); GenericTestUtils.waitFor(() -> { try { return getUnderDeletionBlocksCount(meta) == 0; } catch (IOException ignored) { } return false; }, 100, 1000); newLog.stopCapturing(); // The block deleting successfully and shouldn't catch timed // out warning log. Assert.assertFalse(newLog.getOutput().contains( "Background task executes timed out, retrying in next interval")); } svc.shutdown(); }