org.apache.hadoop.hdfs.HAUtil Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.HAUtil.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DFSZKFailoverController.java From hadoop with Apache License 2.0 | 6 votes |
public static DFSZKFailoverController create(Configuration conf) { Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf); String nsId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(localNNConf, nsId)) { throw new HadoopIllegalArgumentException( "HA is not enabled for this namenode."); } String nnId = HAUtil.getNameNodeId(localNNConf, nsId); if (nnId == null) { String msg = "Could not get the namenode ID of this node. " + "You may run zkfc on the node other than namenode."; throw new HadoopIllegalArgumentException(msg); } NameNode.initializeGenericKeys(localNNConf, nsId, nnId); DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS); NNHAServiceTarget localTarget = new NNHAServiceTarget( localNNConf, nsId, nnId); return new DFSZKFailoverController(localNNConf, localTarget); }
Example #2
Source File: NameNode.java From hadoop with Apache License 2.0 | 6 votes |
@VisibleForTesting public static boolean doRollback(Configuration conf, boolean isConfirmationNeeded) throws IOException { String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId); FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf)); System.err.print( "\"rollBack\" will remove the current state of the file system,\n" + "returning you to the state prior to initiating your recent.\n" + "upgrade. This action is permanent and cannot be undone. If you\n" + "are performing a rollback in an HA environment, you should be\n" + "certain that no NameNode process is running on any host."); if (isConfirmationNeeded) { if (!confirmPrompt("Roll back file system state?")) { System.err.println("Rollback aborted."); return true; } } nsys.getFSImage().doRollback(nsys); return false; }
Example #3
Source File: TestInitializeSharedEdits.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setupCluster() throws IOException { conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology(); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) .build(); cluster.waitActive(); shutdownClusterAndRemoveSharedEditsDir(); }
Example #4
Source File: NameNode.java From big-c with Apache License 2.0 | 6 votes |
@VisibleForTesting public static boolean doRollback(Configuration conf, boolean isConfirmationNeeded) throws IOException { String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId); FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf)); System.err.print( "\"rollBack\" will remove the current state of the file system,\n" + "returning you to the state prior to initiating your recent.\n" + "upgrade. This action is permanent and cannot be undone. If you\n" + "are performing a rollback in an HA environment, you should be\n" + "certain that no NameNode process is running on any host."); if (isConfirmationNeeded) { if (!confirmPrompt("Roll back file system state?")) { System.err.println("Rollback aborted."); return true; } } nsys.getFSImage().doRollback(nsys); return false; }
Example #5
Source File: FSImage.java From hadoop with Apache License 2.0 | 6 votes |
/** rollback for rolling upgrade. */ private void rollingRollback(long discardSegmentTxId, long ckptId) throws IOException { // discard discard unnecessary editlog segments starting from the given id this.editLog.discardSegments(discardSegmentTxId); // rename the special checkpoint renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE, true); // purge all the checkpoints after the marker archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId); String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); if (HAUtil.isHAEnabled(conf, nameserviceId)) { // close the editlog since it is currently open for write this.editLog.close(); // reopen the editlog for read this.editLog.initSharedJournalsForRead(); } }
Example #6
Source File: FSImage.java From big-c with Apache License 2.0 | 6 votes |
/** rollback for rolling upgrade. */ private void rollingRollback(long discardSegmentTxId, long ckptId) throws IOException { // discard discard unnecessary editlog segments starting from the given id this.editLog.discardSegments(discardSegmentTxId); // rename the special checkpoint renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE, true); // purge all the checkpoints after the marker archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId); String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); if (HAUtil.isHAEnabled(conf, nameserviceId)) { // close the editlog since it is currently open for write this.editLog.close(); // reopen the editlog for read this.editLog.initSharedJournalsForRead(); } }
Example #7
Source File: DFSZKFailoverController.java From big-c with Apache License 2.0 | 6 votes |
public static DFSZKFailoverController create(Configuration conf) { Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf); String nsId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(localNNConf, nsId)) { throw new HadoopIllegalArgumentException( "HA is not enabled for this namenode."); } String nnId = HAUtil.getNameNodeId(localNNConf, nsId); if (nnId == null) { String msg = "Could not get the namenode ID of this node. " + "You may run zkfc on the node other than namenode."; throw new HadoopIllegalArgumentException(msg); } NameNode.initializeGenericKeys(localNNConf, nsId, nnId); DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS); NNHAServiceTarget localTarget = new NNHAServiceTarget( localNNConf, nsId, nnId); return new DFSZKFailoverController(localNNConf, localTarget); }
Example #8
Source File: TestXAttrsWithHA.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setupCluster() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1) .waitSafeMode(false) .build(); cluster.waitActive(); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); fs = HATestUtil.configureFailoverFs(cluster, conf); cluster.transitionToActive(0); }
Example #9
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 6 votes |
/** * Download the most recent fsimage from the name node, and save it to a local * file in the given directory. * * @param argv * List of of command line parameters. * @param idx * The index of the command that is being processed. * @return an exit code indicating success or failure. * @throws IOException */ public int fetchImage(final String[] argv, final int idx) throws IOException { Configuration conf = getConf(); final URL infoServer = DFSUtil.getInfoServer( HAUtil.getAddressOfActive(getDFS()), conf, DFSUtil.getHttpClientScheme(conf)).toURL(); SecurityUtil.doAsCurrentUser(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { TransferFsImage.downloadMostRecentImageToDirectory(infoServer, new File(argv[idx])); return null; } }); return 0; }
Example #10
Source File: YarnUtils.java From twill with Apache License 2.0 | 6 votes |
/** * Clones the delegation token to individual host behind the same logical address. * * @param config the hadoop configuration * @throws IOException if failed to get information for the current user. */ public static void cloneHaNnCredentials(Configuration config) throws IOException { String scheme = URI.create(config.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT)).getScheme(); // Loop through all name services. Each name service could have multiple name node associated with it. for (Map.Entry<String, Map<String, InetSocketAddress>> entry : getHaNnRpcAddresses(config).entrySet()) { String nsId = entry.getKey(); Map<String, InetSocketAddress> addressesInNN = entry.getValue(); if (!HAUtil.isHAEnabled(config, nsId) || addressesInNN == null || addressesInNN.isEmpty()) { continue; } // The client may have a delegation token set for the logical // URI of the cluster. Clone this token to apply to each of the // underlying IPC addresses so that the IPC code can find it. URI uri = URI.create(scheme + "://" + nsId); LOG.info("Cloning delegation token for uri {}", uri); cloneDelegationTokenForLogicalUri(UserGroupInformation.getCurrentUser(), uri, addressesInNN.values()); } }
Example #11
Source File: DFSAdmin.java From hadoop with Apache License 2.0 | 6 votes |
/** * Download the most recent fsimage from the name node, and save it to a local * file in the given directory. * * @param argv * List of of command line parameters. * @param idx * The index of the command that is being processed. * @return an exit code indicating success or failure. * @throws IOException */ public int fetchImage(final String[] argv, final int idx) throws IOException { Configuration conf = getConf(); final URL infoServer = DFSUtil.getInfoServer( HAUtil.getAddressOfActive(getDFS()), conf, DFSUtil.getHttpClientScheme(conf)).toURL(); SecurityUtil.doAsCurrentUser(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { TransferFsImage.downloadMostRecentImageToDirectory(infoServer, new File(argv[idx])); return null; } }); return 0; }
Example #12
Source File: DFSAdmin.java From hadoop with Apache License 2.0 | 6 votes |
/** * Dumps DFS data structures into specified file. * Usage: hdfs dfsadmin -metasave filename * @param argv List of of command line parameters. * @param idx The index of the command that is being processed. * @exception IOException if an error occurred while accessing * the file or path. */ public int metaSave(String[] argv, int idx) throws IOException { String pathname = argv[idx]; DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy : proxies) { proxy.getProxy().metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + proxy.getAddress()); } } else { dfs.metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + dfs.getUri()); } return 0; }
Example #13
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 6 votes |
/** * Dumps DFS data structures into specified file. * Usage: hdfs dfsadmin -metasave filename * @param argv List of of command line parameters. * @param idx The index of the command that is being processed. * @exception IOException if an error occurred while accessing * the file or path. */ public int metaSave(String[] argv, int idx) throws IOException { String pathname = argv[idx]; DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy : proxies) { proxy.getProxy().metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + proxy.getAddress()); } } else { dfs.metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + dfs.getUri()); } return 0; }
Example #14
Source File: TestDFSAdminWithHA.java From hadoop with Apache License 2.0 | 6 votes |
private void setUpHaCluster(boolean security) throws Exception { conf = new Configuration(); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, security); cluster = new MiniQJMHACluster.Builder(conf).build(); setHAConf(conf, cluster.getDfsCluster().getNameNode(0).getHostAndPort(), cluster.getDfsCluster().getNameNode(1).getHostAndPort()); cluster.getDfsCluster().getNameNode(0).getHostAndPort(); admin = new DFSAdmin(); admin.setConf(conf); assertTrue(HAUtil.isHAEnabled(conf, "ns1")); originOut = System.out; originErr = System.err; System.setOut(new PrintStream(out)); System.setErr(new PrintStream(err)); }
Example #15
Source File: TestQuotasWithHA.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setupCluster() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); HAUtil.setAllowStandbyReads(conf, true); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1) .waitSafeMode(false) .build(); cluster.waitActive(); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); fs = HATestUtil.configureFailoverFs(cluster, conf); cluster.transitionToActive(0); }
Example #16
Source File: TestDFSClientFailover.java From hadoop with Apache License 2.0 | 6 votes |
/** * Test to verify legacy proxy providers are correctly wrapped. */ @Test public void testWrappedFailoverProxyProvider() throws Exception { // setup the config with the dummy provider class Configuration config = new HdfsConfiguration(conf); String logicalName = HATestUtil.getLogicalHostname(cluster); HATestUtil.setFailoverConfigurations(cluster, config, logicalName); config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName, DummyLegacyFailoverProxyProvider.class.getName()); Path p = new Path("hdfs://" + logicalName + "/"); // not to use IP address for token service SecurityUtil.setTokenServiceUseIp(false); // Logical URI should be used. assertTrue("Legacy proxy providers should use logical URI.", HAUtil.useLogicalUri(config, p.toUri())); }
Example #17
Source File: TestDFSClientFailover.java From big-c with Apache License 2.0 | 6 votes |
/** * Test to verify legacy proxy providers are correctly wrapped. */ @Test public void testWrappedFailoverProxyProvider() throws Exception { // setup the config with the dummy provider class Configuration config = new HdfsConfiguration(conf); String logicalName = HATestUtil.getLogicalHostname(cluster); HATestUtil.setFailoverConfigurations(cluster, config, logicalName); config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName, DummyLegacyFailoverProxyProvider.class.getName()); Path p = new Path("hdfs://" + logicalName + "/"); // not to use IP address for token service SecurityUtil.setTokenServiceUseIp(false); // Logical URI should be used. assertTrue("Legacy proxy providers should use logical URI.", HAUtil.useLogicalUri(config, p.toUri())); }
Example #18
Source File: TestXAttrsWithHA.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setupCluster() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1) .waitSafeMode(false) .build(); cluster.waitActive(); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); fs = HATestUtil.configureFailoverFs(cluster, conf); cluster.transitionToActive(0); }
Example #19
Source File: TestInitializeSharedEdits.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setupCluster() throws IOException { conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology(); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) .build(); cluster.waitActive(); shutdownClusterAndRemoveSharedEditsDir(); }
Example #20
Source File: TestQuotasWithHA.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setupCluster() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); HAUtil.setAllowStandbyReads(conf, true); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1) .waitSafeMode(false) .build(); cluster.waitActive(); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); fs = HATestUtil.configureFailoverFs(cluster, conf); cluster.transitionToActive(0); }
Example #21
Source File: YarnUtils.java From twill with Apache License 2.0 | 5 votes |
/** * When hadoop_version > 2.8.0, class HAUtil has no method cloneDelegationTokenForLogicalUri(Configuration config) * */ private static void cloneDelegationTokenForLogicalUri(UserGroupInformation ugi, URI haUri, Collection<InetSocketAddress> nnAddrs) { if (hasHAUtilsClient) { invokeStaticMethodWithExceptionHandled(cloneDelegationTokenForLogicalUriMethod, ugi, haUri, nnAddrs); } else { HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs); } }
Example #22
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 5 votes |
/** * Command to ask the namenode to reread the hosts and excluded hosts * file. * Usage: hdfs dfsadmin -refreshNodes * @exception IOException */ public int refreshNodes() throws IOException { int exitCode = -1; DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy: proxies) { proxy.getProxy().refreshNodes(); System.out.println("Refresh nodes successful for " + proxy.getAddress()); } } else { dfs.refreshNodes(); System.out.println("Refresh nodes successful"); } exitCode = 0; return exitCode; }
Example #23
Source File: ParameterParser.java From big-c with Apache License 2.0 | 5 votes |
Token<DelegationTokenIdentifier> delegationToken() throws IOException { String delegation = param(DelegationParam.NAME); final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(); token.decodeFromUrlString(delegation); URI nnUri = URI.create(HDFS_URI_SCHEME + "://" + namenodeId()); boolean isLogical = HAUtil.isLogicalUri(conf, nnUri); if (isLogical) { token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri, HDFS_URI_SCHEME)); } else { token.setService(SecurityUtil.buildTokenService(nnUri)); } return token; }
Example #24
Source File: TokenAspect.java From big-c with Apache License 2.0 | 5 votes |
private TokenManagementDelegator getInstance(Token<?> token, Configuration conf) throws IOException { final URI uri; final String scheme = getSchemeByKind(token.getKind()); if (HAUtil.isTokenForLogicalUri(token)) { uri = HAUtil.getServiceUriFromToken(scheme, token); } else { final InetSocketAddress address = SecurityUtil.getTokenServiceAddr (token); uri = URI.create(scheme + "://" + NetUtils.getHostPortString(address)); } return (TokenManagementDelegator) FileSystem.get(uri, conf); }
Example #25
Source File: NameNode.java From big-c with Apache License 2.0 | 5 votes |
/** * Verify that configured directories exist, then print the metadata versions * of the software and the image. * * @param conf configuration to use * @throws IOException */ private static boolean printMetadataVersion(Configuration conf) throws IOException { final String nsId = DFSUtil.getNamenodeNameServiceId(conf); final String namenodeId = HAUtil.getNameNodeId(conf, nsId); NameNode.initializeGenericKeys(conf, nsId, namenodeId); final FSImage fsImage = new FSImage(conf); final FSNamesystem fs = new FSNamesystem(conf, fsImage, false); return fsImage.recoverTransitionRead( StartupOption.METADATAVERSION, fs, null); }
Example #26
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 5 votes |
/** * Refresh the authorization policy on the {@link NameNode}. * @return exitcode 0 on success, non-zero on failure * @throws IOException */ public int refreshServiceAcl() throws IOException { // Get the current configuration Configuration conf = getConf(); // for security authorization // server principal for this call // should be NN's one. conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "")); DistributedFileSystem dfs = getDFS(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri); if (isHaEnabled) { // Run refreshServiceAcl for all NNs if HA is enabled String nsId = dfsUri.getHost(); List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshAuthorizationPolicyProtocol.class); for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) { proxy.getProxy().refreshServiceAcl(); System.out.println("Refresh service acl successful for " + proxy.getAddress()); } } else { // Create the client RefreshAuthorizationPolicyProtocol refreshProtocol = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), RefreshAuthorizationPolicyProtocol.class).getProxy(); // Refresh the authorization policy in-effect refreshProtocol.refreshServiceAcl(); System.out.println("Refresh service acl successful"); } return 0; }
Example #27
Source File: FSImage.java From big-c with Apache License 2.0 | 5 votes |
public void initEditLog(StartupOption startOpt) throws IOException { Preconditions.checkState(getNamespaceID() != 0, "Must know namespace ID before initting edit log"); String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(conf, nameserviceId)) { // If this NN is not HA editLog.initJournalsForWrite(); editLog.recoverUnclosedStreams(); } else if (HAUtil.isHAEnabled(conf, nameserviceId) && (startOpt == StartupOption.UPGRADE || startOpt == StartupOption.UPGRADEONLY || RollingUpgradeStartupOption.ROLLBACK.matches(startOpt))) { // This NN is HA, but we're doing an upgrade or a rollback of rolling // upgrade so init the edit log for write. editLog.initJournalsForWrite(); if (startOpt == StartupOption.UPGRADE || startOpt == StartupOption.UPGRADEONLY) { long sharedLogCTime = editLog.getSharedLogCTime(); if (this.storage.getCTime() < sharedLogCTime) { throw new IOException("It looks like the shared log is already " + "being upgraded but this NN has not been upgraded yet. You " + "should restart this NameNode with the '" + StartupOption.BOOTSTRAPSTANDBY.getName() + "' option to bring " + "this NN in sync with the other."); } } editLog.recoverUnclosedStreams(); } else { // This NN is HA and we're not doing an upgrade. editLog.initSharedJournalsForRead(); } }
Example #28
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 5 votes |
/** * Refresh the user-to-groups mappings on the {@link NameNode}. * @return exitcode 0 on success, non-zero on failure * @throws IOException */ public int refreshUserToGroupsMappings() throws IOException { // Get the current configuration Configuration conf = getConf(); // for security authorization // server principal for this call // should be NN's one. conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "")); DistributedFileSystem dfs = getDFS(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri); if (isHaEnabled) { // Run refreshUserToGroupsMapings for all NNs if HA is enabled String nsId = dfsUri.getHost(); List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshUserMappingsProtocol.class); for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) { proxy.getProxy().refreshUserToGroupsMappings(); System.out.println("Refresh user to groups mapping successful for " + proxy.getAddress()); } } else { // Create the client RefreshUserMappingsProtocol refreshProtocol = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), RefreshUserMappingsProtocol.class).getProxy(); // Refresh the user-to-groups mappings refreshProtocol.refreshUserToGroupsMappings(); System.out.println("Refresh user to groups mapping successful"); } return 0; }
Example #29
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 5 votes |
/** * refreshSuperUserGroupsConfiguration {@link NameNode}. * @return exitcode 0 on success, non-zero on failure * @throws IOException */ public int refreshSuperUserGroupsConfiguration() throws IOException { // Get the current configuration Configuration conf = getConf(); // for security authorization // server principal for this call // should be NAMENODE's one. conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "")); DistributedFileSystem dfs = getDFS(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri); if (isHaEnabled) { // Run refreshSuperUserGroupsConfiguration for all NNs if HA is enabled String nsId = dfsUri.getHost(); List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshUserMappingsProtocol.class); for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) { proxy.getProxy().refreshSuperUserGroupsConfiguration(); System.out.println("Refresh super user groups configuration " + "successful for " + proxy.getAddress()); } } else { // Create the client RefreshUserMappingsProtocol refreshProtocol = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), RefreshUserMappingsProtocol.class).getProxy(); // Refresh the user-to-groups mappings refreshProtocol.refreshSuperUserGroupsConfiguration(); System.out.println("Refresh super user groups configuration successful"); } return 0; }
Example #30
Source File: DFSck.java From big-c with Apache License 2.0 | 5 votes |
/** * Derive the namenode http address from the current file system, * either default or as set by "-fs" in the generic options. * @return Returns http address or null if failure. * @throws IOException if we can't determine the active NN address */ private URI getCurrentNamenodeAddress(Path target) throws IOException { //String nnAddress = null; Configuration conf = getConf(); //get the filesystem object to verify it is an HDFS system final FileSystem fs = target.getFileSystem(conf); if (!(fs instanceof DistributedFileSystem)) { System.err.println("FileSystem is " + fs.getUri()); return null; } return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, DFSUtil.getHttpClientScheme(conf)); }