org.apache.hadoop.hdfs.client.HdfsAdmin Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.client.HdfsAdmin.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestEncryptionZones.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { conf = new HdfsConfiguration(); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI()); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); // Lower the batch size for testing conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, 2); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); fsWrapper = new FileSystemTestWrapper(fs); fcWrapper = new FileContextTestWrapper( FileContext.getFileContext(cluster.getURI(), conf)); dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); setProvider(); // Create a test key DFSTestUtil.createKey(TEST_KEY, cluster, conf); }
Example #2
Source File: TestReservedRawPaths.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { conf = new HdfsConfiguration(); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); File testRootDir = new File(testRoot).getAbsoluteFile(); final Path jksPath = new Path(testRootDir.toString(), "test.jks"); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri() ); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem()); fcWrapper = new FileContextTestWrapper( FileContext.getFileContext(cluster.getURI(), conf)); dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); // Need to set the client's KeyProvider to the NN's for JKS, // else the updates do not get flushed properly fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem() .getProvider()); DFSTestUtil.createKey(TEST_KEY, cluster, conf); }
Example #3
Source File: TestReservedRawPaths.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { conf = new HdfsConfiguration(); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); File testRootDir = new File(testRoot).getAbsoluteFile(); final Path jksPath = new Path(testRootDir.toString(), "test.jks"); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri() ); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem()); fcWrapper = new FileContextTestWrapper( FileContext.getFileContext(cluster.getURI(), conf)); dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); // Need to set the client's KeyProvider to the NN's for JKS, // else the updates do not get flushed properly fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem() .getProvider()); DFSTestUtil.createKey(TEST_KEY, cluster, conf); }
Example #4
Source File: TestEncryptionZones.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { conf = new HdfsConfiguration(); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI()); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); // Lower the batch size for testing conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, 2); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); fsWrapper = new FileSystemTestWrapper(fs); fcWrapper = new FileContextTestWrapper( FileContext.getFileContext(cluster.getURI(), conf)); dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); setProvider(); // Create a test key DFSTestUtil.createKey(TEST_KEY, cluster, conf); }
Example #5
Source File: TestEncryptionZones.java From hadoop with Apache License 2.0 | 5 votes |
@Test(timeout = 120000) public void testReadWrite() throws Exception { final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); // Create a base file for comparison final Path baseFile = new Path("/base"); final int len = 8192; DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED); // Create the first enc file final Path zone = new Path("/zone"); fs.mkdirs(zone); dfsAdmin.createEncryptionZone(zone, TEST_KEY); final Path encFile1 = new Path(zone, "myfile"); DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED); // Read them back in and compare byte-by-byte verifyFilesEqual(fs, baseFile, encFile1, len); // Roll the key of the encryption zone assertNumZones(1); String keyName = dfsAdmin.listEncryptionZones().next().getKeyName(); cluster.getNamesystem().getProvider().rollNewVersion(keyName); // Read them back in and compare byte-by-byte verifyFilesEqual(fs, baseFile, encFile1, len); // Write a new enc file and validate final Path encFile2 = new Path(zone, "myfile2"); DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED); // FEInfos should be different FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1); FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2); assertFalse("EDEKs should be different", Arrays .equals(feInfo1.getEncryptedDataEncryptionKey(), feInfo2.getEncryptedDataEncryptionKey())); assertNotEquals("Key was rolled, versions should be different", feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName()); // Contents still equal verifyFilesEqual(fs, encFile1, encFile2, len); }
Example #6
Source File: TestEncryptionZones.java From big-c with Apache License 2.0 | 5 votes |
@Test(timeout = 120000) public void testReadWrite() throws Exception { final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); // Create a base file for comparison final Path baseFile = new Path("/base"); final int len = 8192; DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED); // Create the first enc file final Path zone = new Path("/zone"); fs.mkdirs(zone); dfsAdmin.createEncryptionZone(zone, TEST_KEY); final Path encFile1 = new Path(zone, "myfile"); DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED); // Read them back in and compare byte-by-byte verifyFilesEqual(fs, baseFile, encFile1, len); // Roll the key of the encryption zone assertNumZones(1); String keyName = dfsAdmin.listEncryptionZones().next().getKeyName(); cluster.getNamesystem().getProvider().rollNewVersion(keyName); // Read them back in and compare byte-by-byte verifyFilesEqual(fs, baseFile, encFile1, len); // Write a new enc file and validate final Path encFile2 = new Path(zone, "myfile2"); DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED); // FEInfos should be different FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1); FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2); assertFalse("EDEKs should be different", Arrays .equals(feInfo1.getEncryptedDataEncryptionKey(), feInfo2.getEncryptedDataEncryptionKey())); assertNotEquals("Key was rolled, versions should be different", feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName()); // Contents still equal verifyFilesEqual(fs, encFile1, encFile2, len); }
Example #7
Source File: TestEncryptionZones.java From big-c with Apache License 2.0 | 5 votes |
/** * Test listing encryption zones as a non super user. */ @Test(timeout = 60000) public void testListEncryptionZonesAsNonSuperUser() throws Exception { final UserGroupInformation user = UserGroupInformation. createUserForTesting("user", new String[] { "mygroup" }); final Path testRoot = new Path("/tmp/TestEncryptionZones"); final Path superPath = new Path(testRoot, "superuseronly"); final Path allPath = new Path(testRoot, "accessall"); fsWrapper.mkdir(superPath, new FsPermission((short) 0700), true); dfsAdmin.createEncryptionZone(superPath, TEST_KEY); fsWrapper.mkdir(allPath, new FsPermission((short) 0707), true); dfsAdmin.createEncryptionZone(allPath, TEST_KEY); user.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { final HdfsAdmin userAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); try { userAdmin.listEncryptionZones(); } catch (AccessControlException e) { assertExceptionContains("Superuser privilege is required", e); } return null; } }); }
Example #8
Source File: TestEncryptionZonesWithHA.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setupCluster() throws Exception { conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); fsHelper = new FileSystemTestHelper(); String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + new Path(testRootDir.toString(), "test.jks").toUri() ); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1) .build(); cluster.waitActive(); cluster.transitionToActive(0); fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf); DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf); dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf); KeyProviderCryptoExtension nn0Provider = cluster.getNameNode(0).getNamesystem().getProvider(); fs.getClient().setKeyProvider(nn0Provider); }
Example #9
Source File: TestHdfsAdmin.java From big-c with Apache License 2.0 | 5 votes |
/** * Test that we can set and clear quotas via {@link HdfsAdmin}. */ @Test public void testHdfsAdminSetQuota() throws Exception { HdfsAdmin dfsAdmin = new HdfsAdmin( FileSystem.getDefaultUri(conf), conf); FileSystem fs = null; try { fs = FileSystem.get(conf); assertTrue(fs.mkdirs(TEST_PATH)); assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota()); dfsAdmin.setSpaceQuota(TEST_PATH, 10); assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota()); dfsAdmin.setQuota(TEST_PATH, 10); assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota()); dfsAdmin.clearSpaceQuota(TEST_PATH); assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota()); dfsAdmin.clearQuota(TEST_PATH); assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota()); } finally { if (fs != null) { fs.close(); } } }
Example #10
Source File: TestHdfsAdmin.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test that we can set and clear quotas via {@link HdfsAdmin}. */ @Test public void testHdfsAdminSetQuota() throws Exception { HdfsAdmin dfsAdmin = new HdfsAdmin( FileSystem.getDefaultUri(conf), conf); FileSystem fs = null; try { fs = FileSystem.get(conf); assertTrue(fs.mkdirs(TEST_PATH)); assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota()); dfsAdmin.setSpaceQuota(TEST_PATH, 10); assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota()); dfsAdmin.setQuota(TEST_PATH, 10); assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota()); dfsAdmin.clearSpaceQuota(TEST_PATH); assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota()); dfsAdmin.clearQuota(TEST_PATH); assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota()); } finally { if (fs != null) { fs.close(); } } }
Example #11
Source File: TestEncryptionZones.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test listing encryption zones as a non super user. */ @Test(timeout = 60000) public void testListEncryptionZonesAsNonSuperUser() throws Exception { final UserGroupInformation user = UserGroupInformation. createUserForTesting("user", new String[] { "mygroup" }); final Path testRoot = new Path("/tmp/TestEncryptionZones"); final Path superPath = new Path(testRoot, "superuseronly"); final Path allPath = new Path(testRoot, "accessall"); fsWrapper.mkdir(superPath, new FsPermission((short) 0700), true); dfsAdmin.createEncryptionZone(superPath, TEST_KEY); fsWrapper.mkdir(allPath, new FsPermission((short) 0707), true); dfsAdmin.createEncryptionZone(allPath, TEST_KEY); user.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { final HdfsAdmin userAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); try { userAdmin.listEncryptionZones(); } catch (AccessControlException e) { assertExceptionContains("Superuser privilege is required", e); } return null; } }); }
Example #12
Source File: TestEncryptionZonesWithHA.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setupCluster() throws Exception { conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); fsHelper = new FileSystemTestHelper(); String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + new Path(testRootDir.toString(), "test.jks").toUri() ); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1) .build(); cluster.waitActive(); cluster.transitionToActive(0); fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf); DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf); dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf); KeyProviderCryptoExtension nn0Provider = cluster.getNameNode(0).getNamesystem().getProvider(); fs.getClient().setKeyProvider(nn0Provider); }
Example #13
Source File: GetHDFSEvents.java From nifi with Apache License 2.0 | 5 votes |
protected HdfsAdmin getHdfsAdmin() { try { // Currently HdfsAdmin is the only public API that allows access to the inotify API. Because of this we need to have super user rights in HDFS. return new HdfsAdmin(getFileSystem().getUri(), getFileSystem().getConf()); } catch (IOException e) { getLogger().error("Unable to get and instance of HDFS admin. You must be an HDFS super user to view HDFS events."); throw new ProcessException(e); } }
Example #14
Source File: TestGetHDFSEvents.java From nifi with Apache License 2.0 | 5 votes |
@Before public void setup() { mockNiFiProperties = mock(NiFiProperties.class); when(mockNiFiProperties.getKerberosConfigurationFile()).thenReturn(null); kerberosProperties = new KerberosProperties(null); inotifyEventInputStream = mock(DFSInotifyEventInputStream.class); hdfsAdmin = mock(HdfsAdmin.class); }
Example #15
Source File: GetHDFSEvents.java From localization_nifi with Apache License 2.0 | 5 votes |
protected HdfsAdmin getHdfsAdmin() { try { // Currently HdfsAdmin is the only public API that allows access to the inotify API. Because of this we need to have super user rights in HDFS. return new HdfsAdmin(getFileSystem().getUri(), getFileSystem().getConf()); } catch (IOException e) { getLogger().error("Unable to get and instance of HDFS admin. You must be an HDFS super user to view HDFS events."); throw new ProcessException(e); } }
Example #16
Source File: TestGetHDFSEvents.java From localization_nifi with Apache License 2.0 | 5 votes |
@Before public void setup() { mockNiFiProperties = mock(NiFiProperties.class); when(mockNiFiProperties.getKerberosConfigurationFile()).thenReturn(null); kerberosProperties = new KerberosProperties(null); inotifyEventInputStream = mock(DFSInotifyEventInputStream.class); hdfsAdmin = mock(HdfsAdmin.class); }
Example #17
Source File: TestGetHDFSEvents.java From nifi with Apache License 2.0 | 4 votes |
@Override protected HdfsAdmin getHdfsAdmin() { return hdfsAdmin; }
Example #18
Source File: TestHdfsAdmin.java From big-c with Apache License 2.0 | 4 votes |
/** * Make sure that a non-HDFS URI throws a helpful error. */ @Test(expected = IllegalArgumentException.class) public void testHdfsAdminWithBadUri() throws IOException, URISyntaxException { new HdfsAdmin(new URI("file:///bad-scheme"), conf); }
Example #19
Source File: TestEncryptionZones.java From big-c with Apache License 2.0 | 4 votes |
@Test(timeout = 120000) public void testReadWriteUsingWebHdfs() throws Exception { final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); final FileSystem webHdfsFs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); final Path zone = new Path("/zone"); fs.mkdirs(zone); dfsAdmin.createEncryptionZone(zone, TEST_KEY); /* Create an unencrypted file for comparison purposes. */ final Path unencFile = new Path("/unenc"); final int len = 8192; DFSTestUtil.createFile(webHdfsFs, unencFile, len, (short) 1, 0xFEED); /* * Create the same file via webhdfs, but this time encrypted. Compare it * using both webhdfs and DFS. */ final Path encFile1 = new Path(zone, "myfile"); DFSTestUtil.createFile(webHdfsFs, encFile1, len, (short) 1, 0xFEED); verifyFilesEqual(webHdfsFs, unencFile, encFile1, len); verifyFilesEqual(fs, unencFile, encFile1, len); /* * Same thing except this time create the encrypted file using DFS. */ final Path encFile2 = new Path(zone, "myfile2"); DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED); verifyFilesEqual(webHdfsFs, unencFile, encFile2, len); verifyFilesEqual(fs, unencFile, encFile2, len); /* Verify appending to files works correctly. */ appendOneByte(fs, unencFile); appendOneByte(webHdfsFs, encFile1); appendOneByte(fs, encFile2); verifyFilesEqual(webHdfsFs, unencFile, encFile1, len); verifyFilesEqual(fs, unencFile, encFile1, len); verifyFilesEqual(webHdfsFs, unencFile, encFile2, len); verifyFilesEqual(fs, unencFile, encFile2, len); }
Example #20
Source File: TestGetHDFSEvents.java From nifi with Apache License 2.0 | 4 votes |
TestableGetHDFSEvents(KerberosProperties testKerberosProperties, HdfsAdmin hdfsAdmin) { this.testKerberosProperties = testKerberosProperties; this.hdfsAdmin = hdfsAdmin; }
Example #21
Source File: TestRpcProgramNfs3.java From big-c with Apache License 2.0 | 4 votes |
@BeforeClass public static void setup() throws Exception { String currentUser = System.getProperty("user.name"); config.set("fs.permissions.umask-mode", "u=rwx,g=,o="); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserGroupConfKey(currentUser), "*"); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserIpConfKey(currentUser), "*"); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); final Path jksPath = new Path(testRootDir.toString(), "test.jks"); config.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); ProxyUsers.refreshSuperUserGroupsConfiguration(config); cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); nn = cluster.getNameNode(); dfsAdmin = new HdfsAdmin(cluster.getURI(), config); // Use ephemeral ports in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); // Start NFS with allowed.hosts set to "* rw" config.set("dfs.nfs.exports.allowed.hosts", "* rw"); nfs = new Nfs3(config); nfs.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs.getRpcProgram(); hdfs.getClient().setKeyProvider(nn.getNamesystem().getProvider()); DFSTestUtil.createKey(TEST_KEY, cluster, config); // Mock SecurityHandler which returns system user.name securityHandler = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandler.getUser()).thenReturn(currentUser); // Mock SecurityHandler which returns a dummy username "harry" securityHandlerUnpriviledged = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandlerUnpriviledged.getUser()).thenReturn("harry"); }
Example #22
Source File: TestHdfsAdmin.java From hadoop with Apache License 2.0 | 4 votes |
/** * Make sure that a non-HDFS URI throws a helpful error. */ @Test(expected = IllegalArgumentException.class) public void testHdfsAdminWithBadUri() throws IOException, URISyntaxException { new HdfsAdmin(new URI("file:///bad-scheme"), conf); }
Example #23
Source File: TestEncryptionZones.java From hadoop with Apache License 2.0 | 4 votes |
@Test(timeout = 120000) public void testReadWriteUsingWebHdfs() throws Exception { final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); final FileSystem webHdfsFs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); final Path zone = new Path("/zone"); fs.mkdirs(zone); dfsAdmin.createEncryptionZone(zone, TEST_KEY); /* Create an unencrypted file for comparison purposes. */ final Path unencFile = new Path("/unenc"); final int len = 8192; DFSTestUtil.createFile(webHdfsFs, unencFile, len, (short) 1, 0xFEED); /* * Create the same file via webhdfs, but this time encrypted. Compare it * using both webhdfs and DFS. */ final Path encFile1 = new Path(zone, "myfile"); DFSTestUtil.createFile(webHdfsFs, encFile1, len, (short) 1, 0xFEED); verifyFilesEqual(webHdfsFs, unencFile, encFile1, len); verifyFilesEqual(fs, unencFile, encFile1, len); /* * Same thing except this time create the encrypted file using DFS. */ final Path encFile2 = new Path(zone, "myfile2"); DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED); verifyFilesEqual(webHdfsFs, unencFile, encFile2, len); verifyFilesEqual(fs, unencFile, encFile2, len); /* Verify appending to files works correctly. */ appendOneByte(fs, unencFile); appendOneByte(webHdfsFs, encFile1); appendOneByte(fs, encFile2); verifyFilesEqual(webHdfsFs, unencFile, encFile1, len); verifyFilesEqual(fs, unencFile, encFile1, len); verifyFilesEqual(webHdfsFs, unencFile, encFile2, len); verifyFilesEqual(fs, unencFile, encFile2, len); }
Example #24
Source File: TestRpcProgramNfs3.java From hadoop with Apache License 2.0 | 4 votes |
@BeforeClass public static void setup() throws Exception { String currentUser = System.getProperty("user.name"); config.set("fs.permissions.umask-mode", "u=rwx,g=,o="); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserGroupConfKey(currentUser), "*"); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserIpConfKey(currentUser), "*"); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); final Path jksPath = new Path(testRootDir.toString(), "test.jks"); config.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); ProxyUsers.refreshSuperUserGroupsConfiguration(config); cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); nn = cluster.getNameNode(); dfsAdmin = new HdfsAdmin(cluster.getURI(), config); // Use ephemeral ports in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); // Start NFS with allowed.hosts set to "* rw" config.set("dfs.nfs.exports.allowed.hosts", "* rw"); nfs = new Nfs3(config); nfs.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs.getRpcProgram(); hdfs.getClient().setKeyProvider(nn.getNamesystem().getProvider()); DFSTestUtil.createKey(TEST_KEY, cluster, config); // Mock SecurityHandler which returns system user.name securityHandler = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandler.getUser()).thenReturn(currentUser); // Mock SecurityHandler which returns a dummy username "harry" securityHandlerUnpriviledged = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandlerUnpriviledged.getUser()).thenReturn("harry"); }
Example #25
Source File: HdfsFileWatcherPolicy.java From kafka-connect-fs with Apache License 2.0 | 4 votes |
EventStreamThread(FileSystem fs, HdfsAdmin admin, long retrySleepMs) { this.fs = fs; this.admin = admin; this.retrySleepMs = retrySleepMs; this.time = new SystemTime(); }
Example #26
Source File: TestGetHDFSEvents.java From localization_nifi with Apache License 2.0 | 4 votes |
@Override protected HdfsAdmin getHdfsAdmin() { return hdfsAdmin; }
Example #27
Source File: TestGetHDFSEvents.java From localization_nifi with Apache License 2.0 | 4 votes |
TestableGetHDFSEvents(KerberosProperties testKerberosProperties, HdfsAdmin hdfsAdmin) { this.testKerberosProperties = testKerberosProperties; this.hdfsAdmin = hdfsAdmin; }