Java Code Examples for org.apache.hadoop.hdfs.HdfsConfiguration#setLong()
The following examples show how to use
org.apache.hadoop.hdfs.HdfsConfiguration#setLong() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestDataNodeVolumeFailureReporting.java From hadoop with Apache License 2.0 | 6 votes |
/** * Initializes the cluster. * * @param numDataNodes number of datanodes * @param storagesPerDatanode number of storage locations on each datanode * @param failedVolumesTolerated number of acceptable volume failures * @throws Exception if there is any failure */ private void initCluster(int numDataNodes, int storagesPerDatanode, int failedVolumesTolerated) throws Exception { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L); /* * Lower the DN heartbeat, DF rate, and recheck interval to one second * so state about failures and datanode death propagates faster. */ conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, failedVolumesTolerated); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes) .storagesPerDatanode(storagesPerDatanode).build(); cluster.waitActive(); fs = cluster.getFileSystem(); dataDir = cluster.getDataDirectory(); long dnCapacity = DFSTestUtil.getDatanodeCapacity( cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0); volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode(); }
Example 2
Source File: TestDataNodeVolumeFailureToleration.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L); /* * Lower the DN heartbeat, DF rate, and recheck interval to one second * so state about failures and datanode death propagates faster. */ conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); // Allow a single volume failure (there are two volumes) conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); dataDir = cluster.getDataDirectory(); }
Example 3
Source File: TestDataNodeVolumeFailureToleration.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L); /* * Lower the DN heartbeat, DF rate, and recheck interval to one second * so state about failures and datanode death propagates faster. */ conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); // Allow a single volume failure (there are two volumes) conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); dataDir = cluster.getDataDirectory(); }
Example 4
Source File: TestFileTruncate.java From hadoop with Apache License 2.0 | 6 votes |
@BeforeClass public static void startUp() throws IOException { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT); conf.setLong( DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1); cluster = new MiniDFSCluster.Builder(conf) .format(true) .numDataNodes(DATANODE_NUM) .nameNodePort(NameNode.DEFAULT_PORT) .waitSafeMode(true) .build(); fs = cluster.getFileSystem(); }
Example 5
Source File: TestDataNodeVolumeFailure.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { // bring up a cluster of 2 conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, block_size); // Allow a single volume failure (there are two volumes) conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dn_num).build(); cluster.waitActive(); fs = cluster.getFileSystem(); dataDir = new File(cluster.getDataDirectory()); }
Example 6
Source File: TestAuditLogs.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setupCluster() throws Exception { // must configure prior to instantiating the namesystem because it // will reconfigure the logger if async is enabled configureAuditLogs(); conf = new HdfsConfiguration(); final long precision = 1L; conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, useAsyncLog); util = new DFSTestUtil.Builder().setName("TestAuditAllowed"). setNumFiles(20).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs = cluster.getFileSystem(); util.createFiles(fs, fileName); // make sure the appender is what it's supposed to be Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); @SuppressWarnings("unchecked") List<Appender> appenders = Collections.list(logger.getAllAppenders()); assertEquals(1, appenders.size()); assertEquals(useAsyncLog, appenders.get(0) instanceof AsyncAppender); fnames = util.getFileNames(fileName); util.waitReplication(fs, fileName, (short)3); userGroupInfo = UserGroupInformation.createUserForTesting(username, groups); }
Example 7
Source File: TestCacheDirectives.java From hadoop with Apache License 2.0 | 5 votes |
private static HdfsConfiguration createCachingConf() { HdfsConfiguration conf = new HdfsConfiguration(); conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, CACHE_CAPACITY); conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000); conf.setLong(DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1000); // set low limits here for testing purposes conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, 2); conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, 2); return conf; }
Example 8
Source File: TestNamenodeRetryCache.java From hadoop with Apache License 2.0 | 5 votes |
/** Start a cluster */ @Before public void setup() throws Exception { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); nnRpc = cluster.getNameNode().getRpcServer(); filesystem = cluster.getFileSystem(); }
Example 9
Source File: TestDecommissioningStatus.java From hadoop with Apache License 2.0 | 5 votes |
@BeforeClass public static void setUp() throws Exception { conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false); // Set up the hosts/exclude files. localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); dir = new Path(workingDir, "build/test/data/work-dir/decommission"); assertTrue(localFileSys.mkdirs(dir)); excludeFile = new Path(dir, "exclude"); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); Path includeFile = new Path(dir, "include"); conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1); conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1); writeConfigFile(localFileSys, excludeFile, null); writeConfigFile(localFileSys, includeFile, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); fileSys = cluster.getFileSystem(); cluster.getNamesystem().getBlockManager().getDatanodeManager() .setHeartbeatExpireInterval(3000); Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG); }
Example 10
Source File: TestDataNodeVolumeFailure.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { // bring up a cluster of 2 conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, block_size); // Allow a single volume failure (there are two volumes) conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dn_num).build(); cluster.waitActive(); fs = cluster.getFileSystem(); dataDir = new File(cluster.getDataDirectory()); }
Example 11
Source File: TestDecommissioningStatus.java From big-c with Apache License 2.0 | 5 votes |
@BeforeClass public static void setUp() throws Exception { conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false); // Set up the hosts/exclude files. localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); dir = new Path(workingDir, "build/test/data/work-dir/decommission"); assertTrue(localFileSys.mkdirs(dir)); excludeFile = new Path(dir, "exclude"); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); Path includeFile = new Path(dir, "include"); conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath()); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1); conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1); writeConfigFile(localFileSys, excludeFile, null); writeConfigFile(localFileSys, includeFile, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); fileSys = cluster.getFileSystem(); cluster.getNamesystem().getBlockManager().getDatanodeManager() .setHeartbeatExpireInterval(3000); Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG); }
Example 12
Source File: TestFsDatasetCache.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { conf = new HdfsConfiguration(); conf.setLong( DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100); conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 500); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, CACHE_CAPACITY); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); prevCacheManipulator = NativeIO.POSIX.getCacheManipulator(); NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator()); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); nn = cluster.getNameNode(); fsImage = nn.getFSImage(); dn = cluster.getDataNodes().get(0); fsd = dn.getFSDataset(); spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn); }
Example 13
Source File: TestFsDatasetCache.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { conf = new HdfsConfiguration(); conf.setLong( DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100); conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 500); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, CACHE_CAPACITY); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); prevCacheManipulator = NativeIO.POSIX.getCacheManipulator(); NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator()); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); nn = cluster.getNameNode(); fsImage = nn.getFSImage(); dn = cluster.getDataNodes().get(0); fsd = dn.getFSDataset(); spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn); }
Example 14
Source File: TestAuditLogs.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setupCluster() throws Exception { // must configure prior to instantiating the namesystem because it // will reconfigure the logger if async is enabled configureAuditLogs(); conf = new HdfsConfiguration(); final long precision = 1L; conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, useAsyncLog); util = new DFSTestUtil.Builder().setName("TestAuditAllowed"). setNumFiles(20).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs = cluster.getFileSystem(); util.createFiles(fs, fileName); // make sure the appender is what it's supposed to be Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); @SuppressWarnings("unchecked") List<Appender> appenders = Collections.list(logger.getAllAppenders()); assertEquals(1, appenders.size()); assertEquals(useAsyncLog, appenders.get(0) instanceof AsyncAppender); fnames = util.getFileNames(fileName); util.waitReplication(fs, fileName, (short)3); userGroupInfo = UserGroupInformation.createUserForTesting(username, groups); }
Example 15
Source File: TestDiskError.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); }
Example 16
Source File: TestDiskError.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); }
Example 17
Source File: TestDelegationToken.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { config = new HdfsConfiguration(); config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000); config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000); config.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); config.set("hadoop.security.auth_to_local", "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT"); FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0"); cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).build(); cluster.waitActive(); dtSecretManager = NameNodeAdapter.getDtSecretManager( cluster.getNamesystem()); }
Example 18
Source File: TestCacheDirectives.java From big-c with Apache License 2.0 | 5 votes |
private static HdfsConfiguration createCachingConf() { HdfsConfiguration conf = new HdfsConfiguration(); conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, CACHE_CAPACITY); conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000); conf.setLong(DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1000); // set low limits here for testing purposes conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, 2); conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, 2); return conf; }
Example 19
Source File: TestEnhancedByteBufferAccess.java From big-c with Apache License 2.0 | 4 votes |
@Test public void test2GBMmapLimit() throws Exception { Assume.assumeTrue(BlockReaderTestUtil.shouldTestLargeFiles()); HdfsConfiguration conf = initZeroCopyTest(); final long TEST_FILE_LENGTH = 2469605888L; conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL"); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, TEST_FILE_LENGTH); MiniDFSCluster cluster = null; final Path TEST_PATH = new Path("/a"); final String CONTEXT = "test2GBMmapLimit"; conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT); FSDataInputStream fsIn = null, fsIn2 = null; ByteBuffer buf1 = null, buf2 = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short)1, 0xB); DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); fsIn = fs.open(TEST_PATH); buf1 = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(1, buf1.remaining()); fsIn.releaseBuffer(buf1); buf1 = null; fsIn.seek(2147483640L); buf1 = fsIn.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(7, buf1.remaining()); Assert.assertEquals(Integer.MAX_VALUE, buf1.limit()); fsIn.releaseBuffer(buf1); buf1 = null; Assert.assertEquals(2147483647L, fsIn.getPos()); try { buf1 = fsIn.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.fail("expected UnsupportedOperationException"); } catch (UnsupportedOperationException e) { // expected; can't read past 2GB boundary. } fsIn.close(); fsIn = null; // Now create another file with normal-sized blocks, and verify we // can read past 2GB final Path TEST_PATH2 = new Path("/b"); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 268435456L); DFSTestUtil.createFile(fs, TEST_PATH2, 1024 * 1024, TEST_FILE_LENGTH, 268435456L, (short)1, 0xA); fsIn2 = fs.open(TEST_PATH2); fsIn2.seek(2147483640L); buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(8, buf2.remaining()); Assert.assertEquals(2147483648L, fsIn2.getPos()); fsIn2.releaseBuffer(buf2); buf2 = null; buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(1024, buf2.remaining()); Assert.assertEquals(2147484672L, fsIn2.getPos()); fsIn2.releaseBuffer(buf2); buf2 = null; } finally { if (buf1 != null) { fsIn.releaseBuffer(buf1); } if (buf2 != null) { fsIn2.releaseBuffer(buf2); } IOUtils.cleanup(null, fsIn, fsIn2); if (cluster != null) { cluster.shutdown(); } } }
Example 20
Source File: TestEnhancedByteBufferAccess.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void test2GBMmapLimit() throws Exception { Assume.assumeTrue(BlockReaderTestUtil.shouldTestLargeFiles()); HdfsConfiguration conf = initZeroCopyTest(); final long TEST_FILE_LENGTH = 2469605888L; conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL"); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, TEST_FILE_LENGTH); MiniDFSCluster cluster = null; final Path TEST_PATH = new Path("/a"); final String CONTEXT = "test2GBMmapLimit"; conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT); FSDataInputStream fsIn = null, fsIn2 = null; ByteBuffer buf1 = null, buf2 = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short)1, 0xB); DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); fsIn = fs.open(TEST_PATH); buf1 = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(1, buf1.remaining()); fsIn.releaseBuffer(buf1); buf1 = null; fsIn.seek(2147483640L); buf1 = fsIn.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(7, buf1.remaining()); Assert.assertEquals(Integer.MAX_VALUE, buf1.limit()); fsIn.releaseBuffer(buf1); buf1 = null; Assert.assertEquals(2147483647L, fsIn.getPos()); try { buf1 = fsIn.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.fail("expected UnsupportedOperationException"); } catch (UnsupportedOperationException e) { // expected; can't read past 2GB boundary. } fsIn.close(); fsIn = null; // Now create another file with normal-sized blocks, and verify we // can read past 2GB final Path TEST_PATH2 = new Path("/b"); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 268435456L); DFSTestUtil.createFile(fs, TEST_PATH2, 1024 * 1024, TEST_FILE_LENGTH, 268435456L, (short)1, 0xA); fsIn2 = fs.open(TEST_PATH2); fsIn2.seek(2147483640L); buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(8, buf2.remaining()); Assert.assertEquals(2147483648L, fsIn2.getPos()); fsIn2.releaseBuffer(buf2); buf2 = null; buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(1024, buf2.remaining()); Assert.assertEquals(2147484672L, fsIn2.getPos()); fsIn2.releaseBuffer(buf2); buf2 = null; } finally { if (buf1 != null) { fsIn.releaseBuffer(buf1); } if (buf2 != null) { fsIn2.releaseBuffer(buf2); } IOUtils.cleanup(null, fsIn, fsIn2); if (cluster != null) { cluster.shutdown(); } } }