Java Code Examples for org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream#setShouldSkipFsyncForTesting()
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream#setShouldSkipFsyncForTesting() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestCachingStrategy.java From hadoop with Apache License 2.0 | 5 votes |
@BeforeClass public static void setupTest() { EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); // Track calls to posix_fadvise. NativeIO.POSIX.setCacheManipulator(tracker); // Normally, we wait for a few megabytes of data to be read or written // before dropping the cache. This is to avoid an excessive number of // JNI calls to the posix_fadvise function. However, for the purpose // of this test, we want to use small files and see all fadvise calls // happen. BlockSender.CACHE_DROP_INTERVAL_BYTES = 4096; BlockReceiver.CACHE_DROP_LAG_BYTES = 4096; }
Example 2
Source File: TestCachingStrategy.java From big-c with Apache License 2.0 | 5 votes |
@BeforeClass public static void setupTest() { EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); // Track calls to posix_fadvise. NativeIO.POSIX.setCacheManipulator(tracker); // Normally, we wait for a few megabytes of data to be read or written // before dropping the cache. This is to avoid an excessive number of // JNI calls to the posix_fadvise function. However, for the purpose // of this test, we want to use small files and see all fadvise calls // happen. BlockSender.CACHE_DROP_INTERVAL_BYTES = 4096; BlockReceiver.CACHE_DROP_LAG_BYTES = 4096; }
Example 3
Source File: HBaseTestingUtility.java From hbase with Apache License 2.0 | 5 votes |
public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[]) throws Exception { createDirsAndSetProperties(); EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); // Error level to skip some warnings specific to the minicluster. See HBASE-4709 Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR"); Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(), "ERROR"); TraceUtil.initTracer(conf); this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true, true, null, racks, hosts, null); // Set this just-started cluster as our filesystem. setFs(); // Wait for the cluster to be totally up this.dfsCluster.waitClusterUp(); //reset the test directory for test file system dataTestDirOnTestFS = null; String dataTestDir = getDataTestDir().toString(); conf.set(HConstants.HBASE_DIR, dataTestDir); LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir); return this.dfsCluster; }
Example 4
Source File: TestSecureShuffle.java From tez with Apache License 2.0 | 5 votes |
@BeforeClass public static void setupDFSCluster() throws Exception { conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false); EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR); miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); fs = miniDFSCluster.getFileSystem(); conf.set("fs.defaultFS", fs.getUri().toString()); conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_LOCAL_FETCH, false); }
Example 5
Source File: TestPipelinedShuffle.java From tez with Apache License 2.0 | 5 votes |
@BeforeClass public static void setupDFSCluster() throws Exception { conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false); EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR); miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); fs = miniDFSCluster.getFileSystem(); conf.set("fs.defaultFS", fs.getUri().toString()); conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_LOCAL_FETCH, false); }
Example 6
Source File: TestAnalyzer.java From tez with Apache License 2.0 | 5 votes |
@BeforeClass public static void setupClass() throws Exception { conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false); EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); fs = dfsCluster.getFileSystem(); conf.set("fs.defaultFS", fs.getUri().toString()); setupTezCluster(); }
Example 7
Source File: TestHistoryParser.java From tez with Apache License 2.0 | 5 votes |
@BeforeClass public static void setupCluster() throws Exception { conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false); EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR); miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); fs = miniDFSCluster.getFileSystem(); conf.set("fs.defaultFS", fs.getUri().toString()); setupTezCluster(); }
Example 8
Source File: MiniDFSCluster.java From hadoop with Apache License 2.0 | 4 votes |
private void initMiniDFSCluster( Configuration conf, int numDataNodes, StorageType[][] storageTypes, boolean format, boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy, boolean manageDataDfsDirs, StartupOption startOpt, StartupOption dnStartOpt, String[] racks, String[] hosts, long[][] storageCapacities, long[] simulatedCapacities, String clusterId, boolean waitSafeMode, boolean setupHostsFile, MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown, boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays, boolean skipFsyncForTesting) throws IOException { boolean success = false; try { ExitUtil.disableSystemExit(); // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052 FileSystem.enableSymlinks(); synchronized (MiniDFSCluster.class) { instanceId = instanceCount++; } this.conf = conf; base_dir = new File(determineDfsBaseDir()); data_dir = new File(base_dir, "data"); this.waitSafeMode = waitSafeMode; this.checkExitOnShutdown = checkExitOnShutdown; int replication = conf.getInt(DFS_REPLICATION_KEY, 3); conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes)); int safemodeExtension = conf.getInt( DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0); conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension); conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); // In an HA cluster, in order for the StandbyNode to perform checkpoints, // it needs to know the HTTP port of the Active. So, if ephemeral ports // are chosen, disable checkpoints for the test. if (!nnTopology.allHttpPortsSpecified() && nnTopology.isHA()) { LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " + "since no HTTP ports have been specified."); conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false); } if (!nnTopology.allIpcPortsSpecified() && nnTopology.isHA()) { LOG.info("MiniDFSCluster disabling log-roll triggering in the " + "Standby node since no IPC ports have been specified."); conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1); } EditLogFileOutputStream.setShouldSkipFsyncForTesting(skipFsyncForTesting); federation = nnTopology.isFederated(); try { createNameNodesAndSetConf( nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs, enableManagedDfsDirsRedundancy, format, startOpt, clusterId, conf); } catch (IOException ioe) { LOG.error("IOE creating namenodes. Permissions dump:\n" + createPermissionsDiagnosisString(data_dir), ioe); throw ioe; } if (format) { if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) { throw new IOException("Cannot remove data directory: " + data_dir + createPermissionsDiagnosisString(data_dir)); } } if (startOpt == StartupOption.RECOVER) { return; } // Start the DataNodes startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs, dnStartOpt != null ? dnStartOpt : startOpt, racks, hosts, storageCapacities, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays); waitClusterUp(); //make sure ProxyUsers uses the latest conf ProxyUsers.refreshSuperUserGroupsConfiguration(conf); success = true; } finally { if (!success) { shutdown(); } } }
Example 9
Source File: MiniDFSCluster.java From big-c with Apache License 2.0 | 4 votes |
private void initMiniDFSCluster( Configuration conf, int numDataNodes, StorageType[][] storageTypes, boolean format, boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy, boolean manageDataDfsDirs, StartupOption startOpt, StartupOption dnStartOpt, String[] racks, String[] hosts, long[][] storageCapacities, long[] simulatedCapacities, String clusterId, boolean waitSafeMode, boolean setupHostsFile, MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown, boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays, boolean skipFsyncForTesting) throws IOException { boolean success = false; try { ExitUtil.disableSystemExit(); // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052 FileSystem.enableSymlinks(); synchronized (MiniDFSCluster.class) { instanceId = instanceCount++; } this.conf = conf; base_dir = new File(determineDfsBaseDir()); data_dir = new File(base_dir, "data"); this.waitSafeMode = waitSafeMode; this.checkExitOnShutdown = checkExitOnShutdown; int replication = conf.getInt(DFS_REPLICATION_KEY, 3); conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes)); int safemodeExtension = conf.getInt( DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0); conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension); conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); // In an HA cluster, in order for the StandbyNode to perform checkpoints, // it needs to know the HTTP port of the Active. So, if ephemeral ports // are chosen, disable checkpoints for the test. if (!nnTopology.allHttpPortsSpecified() && nnTopology.isHA()) { LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " + "since no HTTP ports have been specified."); conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false); } if (!nnTopology.allIpcPortsSpecified() && nnTopology.isHA()) { LOG.info("MiniDFSCluster disabling log-roll triggering in the " + "Standby node since no IPC ports have been specified."); conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1); } EditLogFileOutputStream.setShouldSkipFsyncForTesting(skipFsyncForTesting); federation = nnTopology.isFederated(); try { createNameNodesAndSetConf( nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs, enableManagedDfsDirsRedundancy, format, startOpt, clusterId, conf); } catch (IOException ioe) { LOG.error("IOE creating namenodes. Permissions dump:\n" + createPermissionsDiagnosisString(data_dir), ioe); throw ioe; } if (format) { if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) { throw new IOException("Cannot remove data directory: " + data_dir + createPermissionsDiagnosisString(data_dir)); } } if (startOpt == StartupOption.RECOVER) { return; } // Start the DataNodes startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs, dnStartOpt != null ? dnStartOpt : startOpt, racks, hosts, storageCapacities, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays); waitClusterUp(); //make sure ProxyUsers uses the latest conf ProxyUsers.refreshSuperUserGroupsConfiguration(conf); success = true; } finally { if (!success) { shutdown(); } } }