org.apache.hadoop.hdfs.web.WebHdfsFileSystem Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.web.WebHdfsFileSystem.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestAuditLogs.java From hadoop with Apache License 2.0 | 6 votes |
/** test that denied access via webhdfs puts proper entry in audit log */ @Test public void testAuditWebHdfsDenied() throws Exception { final Path file = new Path(fnames[0]); fs.setPermission(file, new FsPermission((short)0600)); fs.setOwner(file, "root", null); setupAuditLogs(); try { WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME); InputStream istream = webfs.open(file); int val = istream.read(); fail("open+read must not succeed, got " + val); } catch(AccessControlException E) { System.out.println("got access denied, as expected."); } verifyAuditLogsRepeat(false, 2); }
Example #2
Source File: NameNodeHttpServer.java From big-c with Apache License 2.0 | 6 votes |
private void initWebHdfs(Configuration conf) throws IOException { if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) { // set user pattern based on configuration file UserParam.setUserPattern(conf.get( DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT)); // add authentication filter for webhdfs final String className = conf.get( DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY, DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT); final String name = className; final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*"; Map<String, String> params = getAuthFilterParams(conf); HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className, params, new String[] { pathSpec }); HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className + ")"); // add webhdfs packages httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class .getPackage().getName() + ";" + Param.class.getPackage().getName(), pathSpec); } }
Example #3
Source File: TestAuditLogs.java From hadoop with Apache License 2.0 | 6 votes |
/** test that stat via webhdfs puts proper entry in audit log */ @Test public void testAuditWebHdfsStat() throws Exception { final Path file = new Path(fnames[0]); fs.setPermission(file, new FsPermission((short)0644)); fs.setOwner(file, "root", null); setupAuditLogs(); WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME); FileStatus st = webfs.getFileStatus(file); verifyAuditLogs(true); assertTrue("failed to stat file", st != null && st.isFile()); }
Example #4
Source File: TestAuditLogs.java From hadoop with Apache License 2.0 | 6 votes |
/** test that access via webhdfs puts proper entry in audit log */ @Test public void testAuditWebHdfs() throws Exception { final Path file = new Path(fnames[0]); fs.setPermission(file, new FsPermission((short)0644)); fs.setOwner(file, "root", null); setupAuditLogs(); WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME); InputStream istream = webfs.open(file); int val = istream.read(); istream.close(); verifyAuditLogsRepeat(true, 3); assertTrue("failed to read from file", val >= 0); }
Example #5
Source File: NameNodeHttpServer.java From hadoop with Apache License 2.0 | 6 votes |
private void initWebHdfs(Configuration conf) throws IOException { if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) { // set user pattern based on configuration file UserParam.setUserPattern(conf.get( DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT)); // add authentication filter for webhdfs final String className = conf.get( DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY, DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT); final String name = className; final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*"; Map<String, String> params = getAuthFilterParams(conf); HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className, params, new String[] { pathSpec }); HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className + ")"); // add webhdfs packages httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class .getPackage().getName() + ";" + Param.class.getPackage().getName(), pathSpec); } }
Example #6
Source File: TestAuditLogs.java From big-c with Apache License 2.0 | 6 votes |
/** test that access via webhdfs puts proper entry in audit log */ @Test public void testAuditWebHdfs() throws Exception { final Path file = new Path(fnames[0]); fs.setPermission(file, new FsPermission((short)0644)); fs.setOwner(file, "root", null); setupAuditLogs(); WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME); InputStream istream = webfs.open(file); int val = istream.read(); istream.close(); verifyAuditLogsRepeat(true, 3); assertTrue("failed to read from file", val >= 0); }
Example #7
Source File: TestAuditLogs.java From big-c with Apache License 2.0 | 6 votes |
/** test that denied access via webhdfs puts proper entry in audit log */ @Test public void testAuditWebHdfsDenied() throws Exception { final Path file = new Path(fnames[0]); fs.setPermission(file, new FsPermission((short)0600)); fs.setOwner(file, "root", null); setupAuditLogs(); try { WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME); InputStream istream = webfs.open(file); int val = istream.read(); fail("open+read must not succeed, got " + val); } catch(AccessControlException E) { System.out.println("got access denied, as expected."); } verifyAuditLogsRepeat(false, 2); }
Example #8
Source File: TestAuditLogs.java From big-c with Apache License 2.0 | 6 votes |
/** test that stat via webhdfs puts proper entry in audit log */ @Test public void testAuditWebHdfsStat() throws Exception { final Path file = new Path(fnames[0]); fs.setPermission(file, new FsPermission((short)0644)); fs.setOwner(file, "root", null); setupAuditLogs(); WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME); FileStatus st = webfs.getFileStatus(file); verifyAuditLogs(true); assertTrue("failed to stat file", st != null && st.isFile()); }
Example #9
Source File: DFSUtil.java From big-c with Apache License 2.0 | 5 votes |
/** * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from * the configuration. * * @return list of InetSocketAddresses */ public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses( Configuration conf, String scheme) { if (WebHdfsFileSystem.SCHEME.equals(scheme)) { return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) { return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY); } else { throw new IllegalArgumentException("Unsupported scheme: " + scheme); } }
Example #10
Source File: TestAuditLogs.java From big-c with Apache License 2.0 | 5 votes |
/** test that open via webhdfs puts proper entry in audit log */ @Test public void testAuditWebHdfsOpen() throws Exception { final Path file = new Path(fnames[0]); fs.setPermission(file, new FsPermission((short)0644)); fs.setOwner(file, "root", null); setupAuditLogs(); WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME); webfs.open(file); verifyAuditLogsCheckPattern(true, 3, webOpenPattern); }
Example #11
Source File: TestDFSClientRetries.java From hadoop with Apache License 2.0 | 5 votes |
private static FileSystem createFsWithDifferentUsername( final Configuration conf, final boolean isWebHDFS ) throws IOException, InterruptedException { final String username = UserGroupInformation.getCurrentUser( ).getShortUserName() + "_XXX"; final UserGroupInformation ugi = UserGroupInformation.createUserForTesting( username, new String[]{"supergroup"}); return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME) : DFSTestUtil.getFileSystemAs(ugi, conf); }
Example #12
Source File: TestHttpFSWithKerberos.java From big-c with Apache License 2.0 | 5 votes |
@Test @TestDir @TestJetty @TestHdfs public void testDelegationTokenWithWebhdfsFileSystem() throws Exception { testDelegationTokenWithinDoAs(WebHdfsFileSystem.class, false); }
Example #13
Source File: TestHttpFSPorts.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testWebHdfsCustomUriPortWithCustomDefaultPorts() throws IOException { URI uri = URI.create("webhdfs://localhost:789"); WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(uri, conf); assertEquals(123, fs.getDefaultPort()); assertEquals(uri, fs.getUri()); assertEquals("127.0.0.1:789", fs.getCanonicalServiceName()); }
Example #14
Source File: NamenodeWebHdfsMethods.java From big-c with Apache License 2.0 | 5 votes |
private Token<? extends TokenIdentifier> generateDelegationToken( final NameNode namenode, final UserGroupInformation ugi, final String renewer) throws IOException { final Credentials c = DelegationTokenSecretManager.createCredentials( namenode, ugi, renewer != null? renewer: ugi.getShortUserName()); if (c == null) { return null; } final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next(); Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND : SWebHdfsFileSystem.TOKEN_KIND; t.setKind(kind); return t; }
Example #15
Source File: TestDelegationToken.java From big-c with Apache License 2.0 | 5 votes |
@SuppressWarnings("deprecation") @Test public void testDelegationTokenWebHdfsApi() throws Exception { ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL); final String uri = WebHdfsFileSystem.SCHEME + "://" + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); //get file system as JobTracker final UserGroupInformation ugi = UserGroupInformation.createUserForTesting( "JobTracker", new String[]{"user"}); final WebHdfsFileSystem webhdfs = ugi.doAs( new PrivilegedExceptionAction<WebHdfsFileSystem>() { @Override public WebHdfsFileSystem run() throws Exception { return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config); } }); { //test addDelegationTokens(..) Credentials creds = new Credentials(); final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds); Assert.assertEquals(1, tokens.length); Assert.assertEquals(1, creds.numberOfTokens()); Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next()); checkTokenIdentifier(ugi, tokens[0]); final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds); Assert.assertEquals(0, tokens2.length); } }
Example #16
Source File: TestSymlinkHdfs.java From big-c with Apache License 2.0 | 5 votes |
@BeforeClass public static void beforeClassSetup() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); conf.set(FsPermission.UMASK_LABEL, "000"); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 0); cluster = new MiniDFSCluster.Builder(conf).build(); webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); dfs = cluster.getFileSystem(); }
Example #17
Source File: TestHttpFSPorts.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testWebHdfsCustomDefaultPorts() throws IOException { URI uri = URI.create("webhdfs://localhost"); WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(uri, conf); assertEquals(123, fs.getDefaultPort()); assertEquals(uri, fs.getUri()); assertEquals("127.0.0.1:123", fs.getCanonicalServiceName()); }
Example #18
Source File: TestAuditLogs.java From hadoop with Apache License 2.0 | 5 votes |
/** test that open via webhdfs puts proper entry in audit log */ @Test public void testAuditWebHdfsOpen() throws Exception { final Path file = new Path(fnames[0]); fs.setPermission(file, new FsPermission((short)0644)); fs.setOwner(file, "root", null); setupAuditLogs(); WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME); webfs.open(file); verifyAuditLogsCheckPattern(true, 3, webOpenPattern); }
Example #19
Source File: TestDelegationToken.java From hadoop with Apache License 2.0 | 5 votes |
@SuppressWarnings("deprecation") @Test public void testDelegationTokenWebHdfsApi() throws Exception { ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL); final String uri = WebHdfsFileSystem.SCHEME + "://" + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); //get file system as JobTracker final UserGroupInformation ugi = UserGroupInformation.createUserForTesting( "JobTracker", new String[]{"user"}); final WebHdfsFileSystem webhdfs = ugi.doAs( new PrivilegedExceptionAction<WebHdfsFileSystem>() { @Override public WebHdfsFileSystem run() throws Exception { return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config); } }); { //test addDelegationTokens(..) Credentials creds = new Credentials(); final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds); Assert.assertEquals(1, tokens.length); Assert.assertEquals(1, creds.numberOfTokens()); Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next()); checkTokenIdentifier(ugi, tokens[0]); final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds); Assert.assertEquals(0, tokens2.length); } }
Example #20
Source File: TestDFSClientRetries.java From big-c with Apache License 2.0 | 5 votes |
private static FileSystem createFsWithDifferentUsername( final Configuration conf, final boolean isWebHDFS ) throws IOException, InterruptedException { final String username = UserGroupInformation.getCurrentUser( ).getShortUserName() + "_XXX"; final UserGroupInformation ugi = UserGroupInformation.createUserForTesting( username, new String[]{"supergroup"}); return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME) : DFSTestUtil.getFileSystemAs(ugi, conf); }
Example #21
Source File: TestHttpFSPorts.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testWebHdfsCustomUriPortWithCustomDefaultPorts() throws IOException { URI uri = URI.create("webhdfs://localhost:789"); WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(uri, conf); assertEquals(123, fs.getDefaultPort()); assertEquals(uri, fs.getUri()); assertEquals("127.0.0.1:789", fs.getCanonicalServiceName()); }
Example #22
Source File: TestHttpFSPorts.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testWebHdfsCustomDefaultPorts() throws IOException { URI uri = URI.create("webhdfs://localhost"); WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(uri, conf); assertEquals(123, fs.getDefaultPort()); assertEquals(uri, fs.getUri()); assertEquals("127.0.0.1:123", fs.getCanonicalServiceName()); }
Example #23
Source File: TestSymlinkHdfs.java From hadoop with Apache License 2.0 | 5 votes |
@BeforeClass public static void beforeClassSetup() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); conf.set(FsPermission.UMASK_LABEL, "000"); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 0); cluster = new MiniDFSCluster.Builder(conf).build(); webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); dfs = cluster.getFileSystem(); }
Example #24
Source File: NamenodeWebHdfsMethods.java From hadoop with Apache License 2.0 | 5 votes |
private Token<? extends TokenIdentifier> generateDelegationToken( final NameNode namenode, final UserGroupInformation ugi, final String renewer) throws IOException { final Credentials c = DelegationTokenSecretManager.createCredentials( namenode, ugi, renewer != null? renewer: ugi.getShortUserName()); if (c == null) { return null; } final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next(); Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND : SWebHdfsFileSystem.TOKEN_KIND; t.setKind(kind); return t; }
Example #25
Source File: DFSUtil.java From hadoop with Apache License 2.0 | 5 votes |
/** * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from * the configuration. * * @return list of InetSocketAddresses */ public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses( Configuration conf, String scheme) { if (WebHdfsFileSystem.SCHEME.equals(scheme)) { return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) { return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY); } else { throw new IllegalArgumentException("Unsupported scheme: " + scheme); } }
Example #26
Source File: TestHttpFSWithKerberos.java From hadoop with Apache License 2.0 | 5 votes |
@Test @TestDir @TestJetty @TestHdfs public void testDelegationTokenWithWebhdfsFileSystem() throws Exception { testDelegationTokenWithinDoAs(WebHdfsFileSystem.class, false); }
Example #27
Source File: TestQuota.java From big-c with Apache License 2.0 | 4 votes |
/** * Violate a space quota using files of size < 1 block. Test that block * allocation conservatively assumes that for quota checking the entire * space of the block is used. */ @Test public void testBlockAllocationAdjustsUsageConservatively() throws Exception { Configuration conf = new HdfsConfiguration(); final int BLOCK_SIZE = 6 * 1024; conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); DFSAdmin admin = new DFSAdmin(conf); final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); final String webhdfsuri = WebHdfsFileSystem.SCHEME + "://" + nnAddr; System.out.println("webhdfsuri=" + webhdfsuri); final FileSystem webhdfs = new Path(webhdfsuri).getFileSystem(conf); try { Path dir = new Path("/test"); Path file1 = new Path("/test/test1"); Path file2 = new Path("/test/test2"); boolean exceededQuota = false; final int QUOTA_SIZE = 3 * BLOCK_SIZE; // total space usage including // repl. final int FILE_SIZE = BLOCK_SIZE / 2; ContentSummary c; // Create the directory and set the quota assertTrue(fs.mkdirs(dir)); runCommand(admin, false, "-setSpaceQuota", Integer.toString(QUOTA_SIZE), dir.toString()); // Creating a file should use half the quota DFSTestUtil.createFile(fs, file1, FILE_SIZE, (short) 3, 1L); DFSTestUtil.waitReplication(fs, file1, (short) 3); c = fs.getContentSummary(dir); checkContentSummary(c, webhdfs.getContentSummary(dir)); assertEquals("Quota is half consumed", QUOTA_SIZE / 2, c.getSpaceConsumed()); // We can not create the 2nd file because even though the total spaced // used by two files (2 * 3 * 512/2) would fit within the quota (3 * 512) // when a block for a file is created the space used is adjusted // conservatively (3 * block size, ie assumes a full block is written) // which will violate the quota (3 * block size) since we've already // used half the quota for the first file. try { DFSTestUtil.createFile(fs, file2, FILE_SIZE, (short) 3, 1L); } catch (QuotaExceededException e) { exceededQuota = true; } assertTrue("Quota not exceeded", exceededQuota); } finally { cluster.shutdown(); } }
Example #28
Source File: TestOfflineImageViewerForAcl.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testWebImageViewerForAcl() throws Exception { WebImageViewer viewer = new WebImageViewer( NetUtils.createSocketAddr("localhost:0")); try { viewer.initServer(originalFsimage.getAbsolutePath()); int port = viewer.getPort(); // create a WebHdfsFileSystem instance URI uri = new URI("webhdfs://localhost:" + String.valueOf(port)); Configuration conf = new Configuration(); WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)FileSystem.get(uri, conf); // GETACLSTATUS operation to a directory without ACL AclStatus acl = webhdfs.getAclStatus(new Path("/dirWithNoAcl")); assertEquals(writtenAcls.get("/dirWithNoAcl"), acl); // GETACLSTATUS operation to a directory with a default ACL acl = webhdfs.getAclStatus(new Path("/dirWithDefaultAcl")); assertEquals(writtenAcls.get("/dirWithDefaultAcl"), acl); // GETACLSTATUS operation to a file without ACL acl = webhdfs.getAclStatus(new Path("/noAcl")); assertEquals(writtenAcls.get("/noAcl"), acl); // GETACLSTATUS operation to a file with a ACL acl = webhdfs.getAclStatus(new Path("/withAcl")); assertEquals(writtenAcls.get("/withAcl"), acl); // GETACLSTATUS operation to a file with several ACL entries acl = webhdfs.getAclStatus(new Path("/withSeveralAcls")); assertEquals(writtenAcls.get("/withSeveralAcls"), acl); // GETACLSTATUS operation to a invalid path URL url = new URL("http://localhost:" + port + "/webhdfs/v1/invalid/?op=GETACLSTATUS"); HttpURLConnection connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.connect(); assertEquals(HttpURLConnection.HTTP_NOT_FOUND, connection.getResponseCode()); } finally { // shutdown the viewer viewer.close(); } }
Example #29
Source File: TestEncryptionZones.java From big-c with Apache License 2.0 | 4 votes |
@Test(timeout = 120000) public void testReadWriteUsingWebHdfs() throws Exception { final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); final FileSystem webHdfsFs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); final Path zone = new Path("/zone"); fs.mkdirs(zone); dfsAdmin.createEncryptionZone(zone, TEST_KEY); /* Create an unencrypted file for comparison purposes. */ final Path unencFile = new Path("/unenc"); final int len = 8192; DFSTestUtil.createFile(webHdfsFs, unencFile, len, (short) 1, 0xFEED); /* * Create the same file via webhdfs, but this time encrypted. Compare it * using both webhdfs and DFS. */ final Path encFile1 = new Path(zone, "myfile"); DFSTestUtil.createFile(webHdfsFs, encFile1, len, (short) 1, 0xFEED); verifyFilesEqual(webHdfsFs, unencFile, encFile1, len); verifyFilesEqual(fs, unencFile, encFile1, len); /* * Same thing except this time create the encrypted file using DFS. */ final Path encFile2 = new Path(zone, "myfile2"); DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED); verifyFilesEqual(webHdfsFs, unencFile, encFile2, len); verifyFilesEqual(fs, unencFile, encFile2, len); /* Verify appending to files works correctly. */ appendOneByte(fs, unencFile); appendOneByte(webHdfsFs, encFile1); appendOneByte(fs, encFile2); verifyFilesEqual(webHdfsFs, unencFile, encFile1, len); verifyFilesEqual(fs, unencFile, encFile1, len); verifyFilesEqual(webHdfsFs, unencFile, encFile2, len); verifyFilesEqual(fs, unencFile, encFile2, len); }
Example #30
Source File: TestHttpFSFWithWebhdfsFileSystem.java From hadoop with Apache License 2.0 | 4 votes |
@Override protected Class getFileSystemClass() { return WebHdfsFileSystem.class; }