Java Code Examples for org.apache.hadoop.fs.FileSystemTestHelper#getTestRootDir()
The following examples show how to use
org.apache.hadoop.fs.FileSystemTestHelper#getTestRootDir() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestReservedRawPaths.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { conf = new HdfsConfiguration(); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); File testRootDir = new File(testRoot).getAbsoluteFile(); final Path jksPath = new Path(testRootDir.toString(), "test.jks"); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri() ); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem()); fcWrapper = new FileContextTestWrapper( FileContext.getFileContext(cluster.getURI(), conf)); dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); // Need to set the client's KeyProvider to the NN's for JKS, // else the updates do not get flushed properly fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem() .getProvider()); DFSTestUtil.createKey(TEST_KEY, cluster, conf); }
Example 2
Source File: TestEncryptionZones.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { conf = new HdfsConfiguration(); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI()); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); // Lower the batch size for testing conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, 2); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); fsWrapper = new FileSystemTestWrapper(fs); fcWrapper = new FileContextTestWrapper( FileContext.getFileContext(cluster.getURI(), conf)); dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); setProvider(); // Create a test key DFSTestUtil.createKey(TEST_KEY, cluster, conf); }
Example 3
Source File: TestReservedRawPaths.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { conf = new HdfsConfiguration(); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); File testRootDir = new File(testRoot).getAbsoluteFile(); final Path jksPath = new Path(testRootDir.toString(), "test.jks"); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri() ); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem()); fcWrapper = new FileContextTestWrapper( FileContext.getFileContext(cluster.getURI(), conf)); dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); // Need to set the client's KeyProvider to the NN's for JKS, // else the updates do not get flushed properly fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem() .getProvider()); DFSTestUtil.createKey(TEST_KEY, cluster, conf); }
Example 4
Source File: TestEncryptionZones.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { conf = new HdfsConfiguration(); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI()); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); // Lower the batch size for testing conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, 2); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); fsWrapper = new FileSystemTestWrapper(fs); fcWrapper = new FileContextTestWrapper( FileContext.getFileContext(cluster.getURI(), conf)); dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); setProvider(); // Create a test key DFSTestUtil.createKey(TEST_KEY, cluster, conf); }
Example 5
Source File: TestEncryptionZonesWithHA.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setupCluster() throws Exception { conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); fsHelper = new FileSystemTestHelper(); String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + new Path(testRootDir.toString(), "test.jks").toUri() ); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1) .build(); cluster.waitActive(); cluster.transitionToActive(0); fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf); DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf); dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf); KeyProviderCryptoExtension nn0Provider = cluster.getNameNode(0).getNamesystem().getProvider(); fs.getClient().setKeyProvider(nn0Provider); }
Example 6
Source File: TestEncryptionZonesWithHA.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setupCluster() throws Exception { conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); fsHelper = new FileSystemTestHelper(); String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + new Path(testRootDir.toString(), "test.jks").toUri() ); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1) .build(); cluster.waitActive(); cluster.transitionToActive(0); fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf); DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf); dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf); KeyProviderCryptoExtension nn0Provider = cluster.getNameNode(0).getNamesystem().getProvider(); fs.getClient().setKeyProvider(nn0Provider); }
Example 7
Source File: TestRpcProgramNfs3.java From hadoop with Apache License 2.0 | 4 votes |
@BeforeClass public static void setup() throws Exception { String currentUser = System.getProperty("user.name"); config.set("fs.permissions.umask-mode", "u=rwx,g=,o="); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserGroupConfKey(currentUser), "*"); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserIpConfKey(currentUser), "*"); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); final Path jksPath = new Path(testRootDir.toString(), "test.jks"); config.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); ProxyUsers.refreshSuperUserGroupsConfiguration(config); cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); nn = cluster.getNameNode(); dfsAdmin = new HdfsAdmin(cluster.getURI(), config); // Use ephemeral ports in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); // Start NFS with allowed.hosts set to "* rw" config.set("dfs.nfs.exports.allowed.hosts", "* rw"); nfs = new Nfs3(config); nfs.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs.getRpcProgram(); hdfs.getClient().setKeyProvider(nn.getNamesystem().getProvider()); DFSTestUtil.createKey(TEST_KEY, cluster, config); // Mock SecurityHandler which returns system user.name securityHandler = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandler.getUser()).thenReturn(currentUser); // Mock SecurityHandler which returns a dummy username "harry" securityHandlerUnpriviledged = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandlerUnpriviledged.getUser()).thenReturn("harry"); }
Example 8
Source File: TestKeyProviderFactory.java From hadoop with Apache License 2.0 | 4 votes |
@Before public void setup() { fsHelper = new FileSystemTestHelper(); String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); }
Example 9
Source File: TestRpcProgramNfs3.java From big-c with Apache License 2.0 | 4 votes |
@BeforeClass public static void setup() throws Exception { String currentUser = System.getProperty("user.name"); config.set("fs.permissions.umask-mode", "u=rwx,g=,o="); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserGroupConfKey(currentUser), "*"); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserIpConfKey(currentUser), "*"); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); final Path jksPath = new Path(testRootDir.toString(), "test.jks"); config.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); ProxyUsers.refreshSuperUserGroupsConfiguration(config); cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); nn = cluster.getNameNode(); dfsAdmin = new HdfsAdmin(cluster.getURI(), config); // Use ephemeral ports in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); // Start NFS with allowed.hosts set to "* rw" config.set("dfs.nfs.exports.allowed.hosts", "* rw"); nfs = new Nfs3(config); nfs.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs.getRpcProgram(); hdfs.getClient().setKeyProvider(nn.getNamesystem().getProvider()); DFSTestUtil.createKey(TEST_KEY, cluster, config); // Mock SecurityHandler which returns system user.name securityHandler = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandler.getUser()).thenReturn(currentUser); // Mock SecurityHandler which returns a dummy username "harry" securityHandlerUnpriviledged = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandlerUnpriviledged.getUser()).thenReturn("harry"); }
Example 10
Source File: TestKeyProviderFactory.java From big-c with Apache License 2.0 | 4 votes |
@Before public void setup() { fsHelper = new FileSystemTestHelper(); String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); }