Java Code Examples for org.apache.hadoop.hdfs.HdfsConfiguration#setBoolean()
The following examples show how to use
org.apache.hadoop.hdfs.HdfsConfiguration#setBoolean() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestDelegationTokenForProxyUser.java From big-c with Apache License 2.0 | 6 votes |
@BeforeClass public static void setUp() throws Exception { config = new HdfsConfiguration(); config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); config.setLong( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000); config.setLong( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000); config.setStrings(DefaultImpersonationProvider.getTestProvider(). getProxySuperuserGroupConfKey(REAL_USER), "group1"); config.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); configureSuperUserIPAddresses(config, REAL_USER); FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0"); cluster = new MiniDFSCluster.Builder(config).build(); cluster.waitActive(); ProxyUsers.refreshSuperUserGroupsConfiguration(config); ugi = UserGroupInformation.createRemoteUser(REAL_USER); proxyUgi = UserGroupInformation.createProxyUserForTesting(PROXY_USER, ugi, GROUP_NAMES); }
Example 2
Source File: TestWithSecureMiniDFSCluster.java From streamx with Apache License 2.0 | 6 votes |
private Configuration createSecureConfig(String dataTransferProtection) throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf); conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal); conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab); conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal); conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab); conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal); conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection); conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10); conf.set(DFS_ENCRYPT_DATA_TRANSFER_KEY, "true");//https://issues.apache.org/jira/browse/HDFS-7431 String keystoresDir = baseDir.getAbsolutePath(); String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass()); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); return conf; }
Example 3
Source File: SaslDataTransferTestCase.java From hadoop with Apache License 2.0 | 6 votes |
/** * Creates configuration for starting a secure cluster. * * @param dataTransferProtection supported QOPs * @return configuration for starting a secure cluster * @throws Exception if there is any failure */ protected HdfsConfiguration createSecureConfig( String dataTransferProtection) throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf); conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal); conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab); conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal); conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab); conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal); conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection); conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10); String keystoresDir = baseDir.getAbsolutePath(); String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass()); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); return conf; }
Example 4
Source File: TestNameNodeRetryCacheMetrics.java From hadoop with Apache License 2.0 | 6 votes |
/** Start a cluster */ @Before public void setup() throws Exception { conf = new HdfsConfiguration(); conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true); conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3) .build(); cluster.waitActive(); cluster.transitionToActive(namenodeId); HATestUtil.setFailoverConfigurations(cluster, conf); filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf); namesystem = cluster.getNamesystem(namenodeId); metrics = namesystem.getRetryCache().getMetricsForTests(); }
Example 5
Source File: TestNamenodeRetryCache.java From big-c with Apache License 2.0 | 5 votes |
/** Start a cluster */ @Before public void setup() throws Exception { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); nnRpc = cluster.getNameNode().getRpcServer(); filesystem = cluster.getFileSystem(); }
Example 6
Source File: TestFsDatasetCacheRevocation.java From big-c with Apache License 2.0 | 5 votes |
private static Configuration getDefaultConf() { HdfsConfiguration conf = new HdfsConfiguration(); conf.setLong( DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 50); conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 250); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, TestFsDatasetCache.CACHE_CAPACITY); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true); conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(), "sock").getAbsolutePath()); return conf; }
Example 7
Source File: TestDFSHAAdmin.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testFailoverWithAutoHa() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); // Turn on auto-HA in the config HdfsConfiguration conf = getHAConf(); conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); assertEquals(0, runTool("-failover", "nn1", "nn2")); Mockito.verify(mockZkfcProtocol).gracefulFailover(); }
Example 8
Source File: TestSaslDataTransfer.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testNoSaslAndSecurePortsIgnored() throws Exception { HdfsConfiguration clusterConf = createSecureConfig(""); clusterConf.setBoolean(IGNORE_SECURE_PORTS_FOR_TESTING_KEY, true); startCluster(clusterConf); doTest(clusterConf); }
Example 9
Source File: TestDFSHAAdmin.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test that, if automatic HA is enabled, none of the mutative operations * will succeed, unless the -forcemanual flag is specified. * @throws Exception */ @Test public void testMutativeOperationsWithAutoHaEnabled() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); // Turn on auto-HA in the config HdfsConfiguration conf = getHAConf(); conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); // Should fail without the forcemanual flag assertEquals(-1, runTool("-transitionToActive", "nn1")); assertTrue(errOutput.contains("Refusing to manually manage")); assertEquals(-1, runTool("-transitionToStandby", "nn1")); assertTrue(errOutput.contains("Refusing to manually manage")); Mockito.verify(mockProtocol, Mockito.never()) .transitionToActive(anyReqInfo()); Mockito.verify(mockProtocol, Mockito.never()) .transitionToStandby(anyReqInfo()); // Force flag should bypass the check and change the request source // for the RPC setupConfirmationOnSystemIn(); assertEquals(0, runTool("-transitionToActive", "-forcemanual", "nn1")); setupConfirmationOnSystemIn(); assertEquals(0, runTool("-transitionToStandby", "-forcemanual", "nn1")); Mockito.verify(mockProtocol, Mockito.times(1)).transitionToActive( reqInfoCaptor.capture()); Mockito.verify(mockProtocol, Mockito.times(1)).transitionToStandby( reqInfoCaptor.capture()); // All of the RPCs should have had the "force" source for (StateChangeRequestInfo ri : reqInfoCaptor.getAllValues()) { assertEquals(RequestSource.REQUEST_BY_USER_FORCED, ri.getSource()); } }
Example 10
Source File: TestFsDatasetCacheRevocation.java From hadoop with Apache License 2.0 | 5 votes |
private static Configuration getDefaultConf() { HdfsConfiguration conf = new HdfsConfiguration(); conf.setLong( DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 50); conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 250); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, TestFsDatasetCache.CACHE_CAPACITY); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true); conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(), "sock").getAbsolutePath()); return conf; }
Example 11
Source File: TestDecommissioningStatus.java From hadoop with Apache License 2.0 | 5 votes |
@BeforeClass public static void setUp() throws Exception { conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false); // Set up the hosts/exclude files. localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); dir = new Path(workingDir, "build/test/data/work-dir/decommission"); assertTrue(localFileSys.mkdirs(dir)); excludeFile = new Path(dir, "exclude"); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); Path includeFile = new Path(dir, "include"); conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1); conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1); writeConfigFile(localFileSys, excludeFile, null); writeConfigFile(localFileSys, includeFile, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); fileSys = cluster.getFileSystem(); cluster.getNamesystem().getBlockManager().getDatanodeManager() .setHeartbeatExpireInterval(3000); Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG); }
Example 12
Source File: TestStickyBit.java From hadoop with Apache License 2.0 | 5 votes |
@BeforeClass public static void init() throws Exception { conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); initCluster(true); }
Example 13
Source File: TestAuditLogs.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setupCluster() throws Exception { // must configure prior to instantiating the namesystem because it // will reconfigure the logger if async is enabled configureAuditLogs(); conf = new HdfsConfiguration(); final long precision = 1L; conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, useAsyncLog); util = new DFSTestUtil.Builder().setName("TestAuditAllowed"). setNumFiles(20).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs = cluster.getFileSystem(); util.createFiles(fs, fileName); // make sure the appender is what it's supposed to be Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); @SuppressWarnings("unchecked") List<Appender> appenders = Collections.list(logger.getAllAppenders()); assertEquals(1, appenders.size()); assertEquals(useAsyncLog, appenders.get(0) instanceof AsyncAppender); fnames = util.getFileNames(fileName); util.waitReplication(fs, fileName, (short)3); userGroupInfo = UserGroupInformation.createUserForTesting(username, groups); }
Example 14
Source File: FSXAttrBaseTest.java From hadoop with Apache License 2.0 | 5 votes |
@BeforeClass public static void init() throws Exception { conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 3); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, MAX_SIZE); initCluster(true); }
Example 15
Source File: TestSaslDataTransfer.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testNoSaslAndSecurePortsIgnored() throws Exception { HdfsConfiguration clusterConf = createSecureConfig(""); clusterConf.setBoolean(IGNORE_SECURE_PORTS_FOR_TESTING_KEY, true); startCluster(clusterConf); doTest(clusterConf); }
Example 16
Source File: TestDFSHAAdmin.java From big-c with Apache License 2.0 | 5 votes |
/** * Test that, if automatic HA is enabled, none of the mutative operations * will succeed, unless the -forcemanual flag is specified. * @throws Exception */ @Test public void testMutativeOperationsWithAutoHaEnabled() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); // Turn on auto-HA in the config HdfsConfiguration conf = getHAConf(); conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand()); tool.setConf(conf); // Should fail without the forcemanual flag assertEquals(-1, runTool("-transitionToActive", "nn1")); assertTrue(errOutput.contains("Refusing to manually manage")); assertEquals(-1, runTool("-transitionToStandby", "nn1")); assertTrue(errOutput.contains("Refusing to manually manage")); Mockito.verify(mockProtocol, Mockito.never()) .transitionToActive(anyReqInfo()); Mockito.verify(mockProtocol, Mockito.never()) .transitionToStandby(anyReqInfo()); // Force flag should bypass the check and change the request source // for the RPC setupConfirmationOnSystemIn(); assertEquals(0, runTool("-transitionToActive", "-forcemanual", "nn1")); setupConfirmationOnSystemIn(); assertEquals(0, runTool("-transitionToStandby", "-forcemanual", "nn1")); Mockito.verify(mockProtocol, Mockito.times(1)).transitionToActive( reqInfoCaptor.capture()); Mockito.verify(mockProtocol, Mockito.times(1)).transitionToStandby( reqInfoCaptor.capture()); // All of the RPCs should have had the "force" source for (StateChangeRequestInfo ri : reqInfoCaptor.getAllValues()) { assertEquals(RequestSource.REQUEST_BY_USER_FORCED, ri.getSource()); } }
Example 17
Source File: TestStorageRestore.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setUpNameDirs() throws Exception { config = new HdfsConfiguration(); hdfsDir = new File(MiniDFSCluster.getBaseDirectory()).getCanonicalFile(); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } hdfsDir.mkdirs(); path1 = new File(hdfsDir, "name1"); path2 = new File(hdfsDir, "name2"); path3 = new File(hdfsDir, "name3"); path1.mkdir(); path2.mkdir(); path3.mkdir(); if(!path2.exists() || !path3.exists() || !path1.exists()) { throw new IOException("Couldn't create dfs.name dirs in " + hdfsDir.getAbsolutePath()); } String dfs_name_dir = new String(path1.getPath() + "," + path2.getPath()); System.out.println("configuring hdfsdir is " + hdfsDir.getAbsolutePath() + "; dfs_name_dir = "+ dfs_name_dir + ";dfs_name_edits_dir(only)=" + path3.getPath()); config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dfs_name_dir); config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, dfs_name_dir + "," + path3.getPath()); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(hdfsDir, "secondary").getPath()); FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0"); config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0"); // set the restore feature on config.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true); }
Example 18
Source File: TestDecommissioningStatus.java From big-c with Apache License 2.0 | 5 votes |
@BeforeClass public static void setUp() throws Exception { conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false); // Set up the hosts/exclude files. localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); dir = new Path(workingDir, "build/test/data/work-dir/decommission"); assertTrue(localFileSys.mkdirs(dir)); excludeFile = new Path(dir, "exclude"); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); Path includeFile = new Path(dir, "include"); conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1); conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1); writeConfigFile(localFileSys, excludeFile, null); writeConfigFile(localFileSys, includeFile, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); fileSys = cluster.getFileSystem(); cluster.getNamesystem().getBlockManager().getDatanodeManager() .setHeartbeatExpireInterval(3000); Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG); }
Example 19
Source File: TestEnhancedByteBufferAccess.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testClientMmapDisable() throws Exception { HdfsConfiguration conf = initZeroCopyTest(); conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, false); MiniDFSCluster cluster = null; final Path TEST_PATH = new Path("/a"); final int TEST_FILE_LENGTH = 16385; final int RANDOM_SEED = 23453; final String CONTEXT = "testClientMmapDisable"; FSDataInputStream fsIn = null; DistributedFileSystem fs = null; conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT); try { // With DFS_CLIENT_MMAP_ENABLED set to false, we should not do memory // mapped reads. cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short)1, RANDOM_SEED); DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); fsIn = fs.open(TEST_PATH); try { fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.fail("expected zero-copy read to fail when client mmaps " + "were disabled."); } catch (UnsupportedOperationException e) { } } finally { if (fsIn != null) fsIn.close(); if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } fsIn = null; fs = null; cluster = null; try { // Now try again with DFS_CLIENT_MMAP_CACHE_SIZE == 0. It should work. conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, true); conf.setInt(DFS_CLIENT_MMAP_CACHE_SIZE, 0); conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT + ".1"); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short)1, RANDOM_SEED); DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); fsIn = fs.open(TEST_PATH); ByteBuffer buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); fsIn.releaseBuffer(buf); // Test EOF behavior IOUtils.skipFully(fsIn, TEST_FILE_LENGTH - 1); buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(null, buf); } finally { if (fsIn != null) fsIn.close(); if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } }
Example 20
Source File: TestEnhancedByteBufferAccess.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testClientMmapDisable() throws Exception { HdfsConfiguration conf = initZeroCopyTest(); conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, false); MiniDFSCluster cluster = null; final Path TEST_PATH = new Path("/a"); final int TEST_FILE_LENGTH = 16385; final int RANDOM_SEED = 23453; final String CONTEXT = "testClientMmapDisable"; FSDataInputStream fsIn = null; DistributedFileSystem fs = null; conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT); try { // With DFS_CLIENT_MMAP_ENABLED set to false, we should not do memory // mapped reads. cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short)1, RANDOM_SEED); DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); fsIn = fs.open(TEST_PATH); try { fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.fail("expected zero-copy read to fail when client mmaps " + "were disabled."); } catch (UnsupportedOperationException e) { } } finally { if (fsIn != null) fsIn.close(); if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } fsIn = null; fs = null; cluster = null; try { // Now try again with DFS_CLIENT_MMAP_CACHE_SIZE == 0. It should work. conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, true); conf.setInt(DFS_CLIENT_MMAP_CACHE_SIZE, 0); conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT + ".1"); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short)1, RANDOM_SEED); DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); fsIn = fs.open(TEST_PATH); ByteBuffer buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); fsIn.releaseBuffer(buf); // Test EOF behavior IOUtils.skipFully(fsIn, TEST_FILE_LENGTH - 1); buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(null, buf); } finally { if (fsIn != null) fsIn.close(); if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } }