org.apache.hadoop.hdfs.MiniDFSNNTopology Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.MiniDFSNNTopology.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestQuotasWithHA.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setupCluster() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); HAUtil.setAllowStandbyReads(conf, true); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1) .waitSafeMode(false) .build(); cluster.waitActive(); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); fs = HATestUtil.configureFailoverFs(cluster, conf); cluster.transitionToActive(0); }
Example #2
Source File: TestWebHdfsWithMultipleNameNodes.java From big-c with Apache License 2.0 | 6 votes |
private static void setupCluster(final int nNameNodes, final int nDataNodes) throws Exception { LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes)) .numDataNodes(nDataNodes) .build(); cluster.waitActive(); webhdfs = new WebHdfsFileSystem[nNameNodes]; for(int i = 0; i < webhdfs.length; i++) { final InetSocketAddress addr = cluster.getNameNode(i).getHttpAddress(); final String uri = WebHdfsFileSystem.SCHEME + "://" + addr.getHostName() + ":" + addr.getPort() + "/"; webhdfs[i] = (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf); } }
Example #3
Source File: TestMover.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testMoverCliWithHAConf() throws Exception { final Configuration conf = new HdfsConfiguration(); final MiniDFSCluster cluster = new MiniDFSCluster .Builder(new HdfsConfiguration()) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(0).build(); HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster"); try { Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar"); Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); Assert.assertEquals(1, namenodes.size()); Assert.assertEquals(1, movePaths.size()); URI nn = namenodes.iterator().next(); Assert.assertEquals(new URI("hdfs://MyCluster"), nn); Assert.assertTrue(movePaths.containsKey(nn)); checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar")); } finally { cluster.shutdown(); } }
Example #4
Source File: TestWebHdfsWithMultipleNameNodes.java From hadoop with Apache License 2.0 | 6 votes |
private static void setupCluster(final int nNameNodes, final int nDataNodes) throws Exception { LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes)) .numDataNodes(nDataNodes) .build(); cluster.waitActive(); webhdfs = new WebHdfsFileSystem[nNameNodes]; for(int i = 0; i < webhdfs.length; i++) { final InetSocketAddress addr = cluster.getNameNode(i).getHttpAddress(); final String uri = WebHdfsFileSystem.SCHEME + "://" + addr.getHostName() + ":" + addr.getPort() + "/"; webhdfs[i] = (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf); } }
Example #5
Source File: HdfsSortedOplogOrganizerJUnitTest.java From gemfirexd-oss with Apache License 2.0 | 6 votes |
private MiniDFSCluster initMiniHACluster(int nn1port, int nn2port) throws IOException { Configuration confForMiniDFS = new Configuration(); Builder builder = new MiniDFSCluster.Builder(confForMiniDFS) .nnTopology(new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(nn1port)) .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(nn2port)) )) .numDataNodes(1); MiniDFSCluster cluster = builder.build(); cluster.waitActive(); NameNode nnode1 = cluster.getNameNode(0); assertTrue(nnode1.isStandbyState()); NameNode nnode2 = cluster.getNameNode(1); assertTrue(nnode2.isStandbyState()); cluster.transitionToActive(0); assertFalse(nnode1.isStandbyState()); return cluster; }
Example #6
Source File: TestBootstrapStandbyWithBKJM.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil .createJournalURI("/bootstrapStandby").toString()); BKJMUtil.addJournalManagerDefinition(conf); conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true); conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, SlowCodec.class.getCanonicalName()); CompressionCodecFactory.setCodecClasses(conf, ImmutableList.<Class> of(SlowCodec.class)); MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN( new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)).addNN( new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002))); cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology) .numDataNodes(1).manageNameDfsSharedDirs(false).build(); cluster.waitActive(); }
Example #7
Source File: TestEditLogTailer.java From big-c with Apache License 2.0 | 6 votes |
private static void testStandbyTriggersLogRolls(int activeIndex) throws Exception { Configuration conf = new Configuration(); // Roll every 1s conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); // Have to specify IPC ports so the NNs can talk to each other. MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031)) .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032))); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) .build(); try { cluster.transitionToActive(activeIndex); waitForLogRollInSharedDir(cluster, 3); } finally { cluster.shutdown(); } }
Example #8
Source File: TestRetryCacheWithHA.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(DataNodes).build(); cluster.waitActive(); cluster.transitionToActive(0); // setup the configuration HATestUtil.setFailoverConfigurations(cluster, conf); dfs = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf); }
Example #9
Source File: TestInitializeSharedEdits.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setupCluster() throws IOException { conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology(); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) .build(); cluster.waitActive(); shutdownClusterAndRemoveSharedEditsDir(); }
Example #10
Source File: TestRetryCacheWithHA.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(DataNodes).build(); cluster.waitActive(); cluster.transitionToActive(0); // setup the configuration HATestUtil.setFailoverConfigurations(cluster, conf); dfs = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf); }
Example #11
Source File: TestHarFileSystemWithHA.java From hadoop with Apache License 2.0 | 6 votes |
/** * Test that the HarFileSystem works with underlying HDFS URIs that have no * port specified, as is often the case with an HA setup. */ @Test public void testHarUriWithHaUriWithNoPort() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .build(); cluster.transitionToActive(0); HATestUtil.setFailoverConfigurations(cluster, conf); createEmptyHarArchive(HATestUtil.configureFailoverFs(cluster, conf), TEST_HAR_PATH); URI failoverUri = FileSystem.getDefaultUri(conf); Path p = new Path("har://hdfs-" + failoverUri.getAuthority() + TEST_HAR_PATH); p.getFileSystem(conf); } finally { cluster.shutdown(); } }
Example #12
Source File: TestHASafeMode.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setupCluster() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(3) .waitSafeMode(false) .build(); cluster.waitActive(); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); fs = HATestUtil.configureFailoverFs(cluster, conf); cluster.transitionToActive(0); }
Example #13
Source File: HdfsSortedOplogOrganizerJUnitTest.java From gemfirexd-oss with Apache License 2.0 | 6 votes |
private MiniDFSCluster initMiniHACluster(int nn1port, int nn2port) throws IOException { Configuration confForMiniDFS = new Configuration(); Builder builder = new MiniDFSCluster.Builder(confForMiniDFS) .nnTopology(new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(nn1port)) .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(nn2port)) )) .numDataNodes(1); MiniDFSCluster cluster = builder.build(); cluster.waitActive(); NameNode nnode1 = cluster.getNameNode(0); assertTrue(nnode1.isStandbyState()); NameNode nnode2 = cluster.getNameNode(1); assertTrue(nnode2.isStandbyState()); cluster.transitionToActive(0); assertFalse(nnode1.isStandbyState()); return cluster; }
Example #14
Source File: TestEditLogTailer.java From hadoop with Apache License 2.0 | 6 votes |
private static void testStandbyTriggersLogRolls(int activeIndex) throws Exception { Configuration conf = new Configuration(); // Roll every 1s conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); // Have to specify IPC ports so the NNs can talk to each other. MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031)) .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032))); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) .build(); try { cluster.transitionToActive(activeIndex); waitForLogRollInSharedDir(cluster, 3); } finally { cluster.shutdown(); } }
Example #15
Source File: TestBootstrapStandby.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setupCluster() throws IOException { Configuration conf = new Configuration(); MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(20001)) .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(20002))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) .build(); cluster.waitActive(); nn0 = cluster.getNameNode(0); cluster.transitionToActive(0); cluster.shutdownNameNode(1); }
Example #16
Source File: TestNameNodeRetryCacheMetrics.java From hadoop with Apache License 2.0 | 6 votes |
/** Start a cluster */ @Before public void setup() throws Exception { conf = new HdfsConfiguration(); conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true); conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3) .build(); cluster.waitActive(); cluster.transitionToActive(namenodeId); HATestUtil.setFailoverConfigurations(cluster, conf); filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf); namesystem = cluster.getNamesystem(namenodeId); metrics = namesystem.getRetryCache().getMetricsForTests(); }
Example #17
Source File: MiniQJMHACluster.java From hadoop with Apache License 2.0 | 5 votes |
private MiniQJMHACluster(Builder builder) throws IOException { this.conf = builder.conf; int retryCount = 0; while (true) { try { basePort = 10000 + RANDOM.nextInt(1000) * 4; // start 3 journal nodes journalCluster = new MiniJournalCluster.Builder(conf).format(true) .build(); URI journalURI = journalCluster.getQuorumJournalURI(NAMESERVICE); // start cluster with 2 NameNodes MiniDFSNNTopology topology = createDefaultTopology(basePort); initHAConf(journalURI, builder.conf); // First start up the NNs just to format the namespace. The MinIDFSCluster // has no way to just format the NameNodes without also starting them. cluster = builder.dfsBuilder.nnTopology(topology) .manageNameDfsSharedDirs(false).build(); cluster.waitActive(); cluster.shutdownNameNodes(); // initialize the journal nodes Configuration confNN0 = cluster.getConfiguration(0); NameNode.initializeSharedEdits(confNN0, true); cluster.getNameNodeInfos()[0].setStartOpt(builder.startOpt); cluster.getNameNodeInfos()[1].setStartOpt(builder.startOpt); // restart the cluster cluster.restartNameNodes(); ++retryCount; break; } catch (BindException e) { LOG.info("MiniQJMHACluster port conflicts, retried " + retryCount + " times"); } } }
Example #18
Source File: TestViewFsWithAcls.java From big-c with Apache License 2.0 | 5 votes |
@BeforeClass public static void clusterSetupAtBeginning() throws IOException { clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(clusterConf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) .numDataNodes(2) .build(); cluster.waitClusterUp(); fc = FileContext.getFileContext(cluster.getURI(0), clusterConf); fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf); }
Example #19
Source File: TestBookKeeperAsHASharedDir.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test simple HA failover usecase with BK */ @Test public void testFailoverWithBK() throws Exception { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil.createJournalURI("/hotfailover").toString()); BKJMUtil.addJournalManagerDefinition(conf); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(0) .manageNameDfsSharedDirs(false) .build(); NameNode nn1 = cluster.getNameNode(0); NameNode nn2 = cluster.getNameNode(1); cluster.waitActive(); cluster.transitionToActive(0); Path p = new Path("/testBKJMfailover"); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); fs.mkdirs(p); cluster.shutdownNameNode(0); cluster.transitionToActive(1); assertTrue(fs.exists(p)); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #20
Source File: TestDataNodeExit.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setUp() throws IOException { conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 100); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3)) .build(); for (int i = 0; i < 3; i++) { cluster.waitActive(i); } }
Example #21
Source File: TestDFSHAAdminMiniCluster.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setup() throws IOException { conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0) .build(); tool = new DFSHAAdmin(); tool.setConf(conf); tool.setErrOut(new PrintStream(errOutBytes)); cluster.waitActive(); nn1Port = cluster.getNameNodePort(0); }
Example #22
Source File: MiniQJMHACluster.java From big-c with Apache License 2.0 | 5 votes |
public static MiniDFSNNTopology createDefaultTopology(int basePort) { return new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf(NAMESERVICE).addNN( new MiniDFSNNTopology.NNConf("nn1").setIpcPort(basePort) .setHttpPort(basePort + 1)).addNN( new MiniDFSNNTopology.NNConf("nn2").setIpcPort(basePort + 2) .setHttpPort(basePort + 3))); }
Example #23
Source File: TestViewFileSystemWithAcls.java From big-c with Apache License 2.0 | 5 votes |
@BeforeClass public static void clusterSetupAtBeginning() throws IOException { clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(clusterConf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) .numDataNodes(2) .build(); cluster.waitClusterUp(); fHdfs = cluster.getFileSystem(0); fHdfs2 = cluster.getFileSystem(1); }
Example #24
Source File: TestEditLogAutoroll.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { conf = new Configuration(); // Stall the standby checkpointer in two ways conf.setLong(DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, Long.MAX_VALUE); conf.setLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 20); // Make it autoroll after 10 edits conf.setFloat(DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, 0.5f); conf.setInt(DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS, 100); int retryCount = 0; while (true) { try { int basePort = 10060 + random.nextInt(100) * 2; MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort)) .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) .build(); cluster.waitActive(); nn0 = cluster.getNameNode(0); fs = HATestUtil.configureFailoverFs(cluster, conf); cluster.transitionToActive(0); fs = cluster.getFileSystem(0); editLog = nn0.getNamesystem().getEditLog(); ++retryCount; break; } catch (BindException e) { LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry " + retryCount + " times"); } } }
Example #25
Source File: TestViewFsWithXAttrs.java From hadoop with Apache License 2.0 | 5 votes |
@BeforeClass public static void clusterSetupAtBeginning() throws IOException { cluster = new MiniDFSCluster.Builder(clusterConf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) .numDataNodes(2) .build(); cluster.waitClusterUp(); fc = FileContext.getFileContext(cluster.getURI(0), clusterConf); fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf); }
Example #26
Source File: HAStressTestHarness.java From hadoop with Apache License 2.0 | 5 votes |
/** * Start and return the MiniDFSCluster. */ public MiniDFSCluster startCluster() throws IOException { cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(3) .build(); return cluster; }
Example #27
Source File: TestGetGroupsWithHA.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setUpNameNode() throws IOException { conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(0).build(); HATestUtil.setFailoverConfigurations(cluster, conf); }
Example #28
Source File: TestStandbyCheckpoints.java From hadoop with Apache License 2.0 | 5 votes |
@SuppressWarnings("rawtypes") @Before public void setupCluster() throws Exception { Configuration conf = setupCommonConfig(); // Dial down the retention of extra edits and checkpoints. This is to // help catch regressions of HDFS-4238 (SBN should not purge shared edits) conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0); int retryCount = 0; while (true) { try { int basePort = 10060 + random.nextInt(100) * 2; MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort)) .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(1) .build(); cluster.waitActive(); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); fs = HATestUtil.configureFailoverFs(cluster, conf); cluster.transitionToActive(0); ++retryCount; break; } catch (BindException e) { LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry " + retryCount + " times"); } } }
Example #29
Source File: TestBookKeeperAsHASharedDir.java From big-c with Apache License 2.0 | 5 votes |
/** * NameNode should load the edits correctly if the applicable edits are * present in the BKJM. */ @Test public void testNameNodeMultipleSwitchesUsingBKJM() throws Exception { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil .createJournalURI("/correctEditLogSelection").toString()); BKJMUtil.addJournalManagerDefinition(conf); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0) .manageNameDfsSharedDirs(false).build(); NameNode nn1 = cluster.getNameNode(0); NameNode nn2 = cluster.getNameNode(1); cluster.waitActive(); cluster.transitionToActive(0); nn1.getRpcServer().rollEditLog(); // Roll Edits from current Active. // Transition to standby current active gracefully. cluster.transitionToStandby(0); // Make the other Active and Roll edits multiple times cluster.transitionToActive(1); nn2.getRpcServer().rollEditLog(); nn2.getRpcServer().rollEditLog(); // Now One more failover. So NN1 should be able to failover successfully. cluster.transitionToStandby(1); cluster.transitionToActive(0); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #30
Source File: TestViewFileSystemHdfs.java From big-c with Apache License 2.0 | 5 votes |
@BeforeClass public static void clusterSetupAtBegining() throws IOException, LoginException, URISyntaxException { SupportsBlocks = true; CONF.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); cluster = new MiniDFSCluster.Builder(CONF).nnTopology( MiniDFSNNTopology.simpleFederatedTopology(2)) .numDataNodes(2) .build(); cluster.waitClusterUp(); fHdfs = cluster.getFileSystem(0); fHdfs2 = cluster.getFileSystem(1); fHdfs.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString()); fHdfs2.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString()); defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); defaultWorkingDirectory2 = fHdfs2.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fHdfs.mkdirs(defaultWorkingDirectory); fHdfs2.mkdirs(defaultWorkingDirectory2); }