org.apache.hadoop.fs.CommonConfigurationKeysPublic Java Examples
The following examples show how to use
org.apache.hadoop.fs.CommonConfigurationKeysPublic.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ClientDatanodeProtocolTranslatorPB.java From hadoop with Apache License 2.0 | 6 votes |
static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy( DatanodeID datanodeid, Configuration conf, int socketTimeout, boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException { final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname); InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr); if (LOG.isDebugEnabled()) { LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr); } // Since we're creating a new UserGroupInformation here, we know that no // future RPC proxies will be able to re-use the same connection. And // usages of this proxy tend to be one-off calls. // // This is a temporary fix: callers should really achieve this by using // RPC.stopProxy() on the resulting object, but this is currently not // working in trunk. See the discussion on HDFS-1965. Configuration confWithNoIpcIdle = new Configuration(conf); confWithNoIpcIdle.setInt(CommonConfigurationKeysPublic .IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0); UserGroupInformation ticket = UserGroupInformation .createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString()); ticket.addToken(locatedBlock.getBlockToken()); return createClientDatanodeProtocolProxy(addr, ticket, confWithNoIpcIdle, NetUtils.getDefaultSocketFactory(conf), socketTimeout); }
Example #2
Source File: TestRackResolver.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testCaching() { Configuration conf = new Configuration(); conf.setClass( CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class); RackResolver.init(conf); try { InetAddress iaddr = InetAddress.getByName("host1"); MyResolver.resolvedHost1 = iaddr.getHostAddress(); } catch (UnknownHostException e) { // Ignore if not found } Node node = RackResolver.resolve("host1"); Assert.assertEquals("/rack1", node.getNetworkLocation()); node = RackResolver.resolve("host1"); Assert.assertEquals("/rack1", node.getNetworkLocation()); node = RackResolver.resolve(invalidHost); Assert.assertEquals(NetworkTopology.DEFAULT_RACK, node.getNetworkLocation()); }
Example #3
Source File: TestAMRMClientContainerRequest.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testFillInRacks() { AMRMClientImpl<ContainerRequest> client = new AMRMClientImpl<ContainerRequest>(); Configuration conf = new Configuration(); conf.setClass( CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class); client.init(conf); Resource capability = Resource.newInstance(1024, 1, 1); ContainerRequest request = new ContainerRequest(capability, new String[] {"host1", "host2"}, new String[] {"/rack2"}, Priority.newInstance(1)); client.addContainerRequest(request); verifyResourceRequest(client, request, "host1", true); verifyResourceRequest(client, request, "host2", true); verifyResourceRequest(client, request, "/rack1", true); verifyResourceRequest(client, request, "/rack2", true); verifyResourceRequest(client, request, ResourceRequest.ANY, true); }
Example #4
Source File: RackResolver.java From hadoop with Apache License 2.0 | 6 votes |
public synchronized static void init(Configuration conf) { if (initCalled) { return; } else { initCalled = true; } Class<? extends DNSToSwitchMapping> dnsToSwitchMappingClass = conf.getClass( CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, ScriptBasedMapping.class, DNSToSwitchMapping.class); try { DNSToSwitchMapping newInstance = ReflectionUtils.newInstance( dnsToSwitchMappingClass, conf); // Wrap around the configured class with the Cached implementation so as // to save on repetitive lookups. // Check if the impl is already caching, to avoid double caching. dnsToSwitchMapping = ((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance : new CachedDNSToSwitchMapping(newInstance)); } catch (Exception e) { throw new RuntimeException(e); } }
Example #5
Source File: ConfiguredRMFailoverProxyProvider.java From hadoop with Apache License 2.0 | 6 votes |
@Override public void init(Configuration configuration, RMProxy<T> rmProxy, Class<T> protocol) { this.rmProxy = rmProxy; this.protocol = protocol; this.rmProxy.checkAllowedProtocols(this.protocol); this.conf = new YarnConfiguration(configuration); Collection<String> rmIds = HAUtil.getRMHAIds(conf); this.rmServiceIds = rmIds.toArray(new String[rmIds.size()]); conf.set(YarnConfiguration.RM_HA_ID, rmServiceIds[currentProxyIndex]); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES, YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES)); conf.setInt(CommonConfigurationKeysPublic. IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS, YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS)); }
Example #6
Source File: TestAMRMClientContainerRequest.java From hadoop with Apache License 2.0 | 6 votes |
@Test (expected = InvalidContainerRequestException.class) public void testDifferentLocalityRelaxationSamePriority() { AMRMClientImpl<ContainerRequest> client = new AMRMClientImpl<ContainerRequest>(); Configuration conf = new Configuration(); conf.setClass( CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class); client.init(conf); Resource capability = Resource.newInstance(1024, 1, 1); ContainerRequest request1 = new ContainerRequest(capability, new String[] {"host1", "host2"}, null, Priority.newInstance(1), false); client.addContainerRequest(request1); ContainerRequest request2 = new ContainerRequest(capability, new String[] {"host3"}, null, Priority.newInstance(1), true); client.addContainerRequest(request2); }
Example #7
Source File: TestAMRMClientContainerRequest.java From hadoop with Apache License 2.0 | 6 votes |
@Test (expected = InvalidContainerRequestException.class) public void testLocalityRelaxationDifferentLevels() { AMRMClientImpl<ContainerRequest> client = new AMRMClientImpl<ContainerRequest>(); Configuration conf = new Configuration(); conf.setClass( CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class); client.init(conf); Resource capability = Resource.newInstance(1024, 1, 1); ContainerRequest request1 = new ContainerRequest(capability, new String[] {"host1", "host2"}, null, Priority.newInstance(1), false); client.addContainerRequest(request1); ContainerRequest request2 = new ContainerRequest(capability, null, new String[] {"rack1"}, Priority.newInstance(1), true); client.addContainerRequest(request2); }
Example #8
Source File: ResourceLocalizationService.java From hadoop with Apache License 2.0 | 6 votes |
Server createServer() { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); if (UserGroupInformation.isSecurityEnabled()) { secretManager = new LocalizerTokenSecretManager(); } Server server = rpc.getServer(LocalizationProtocol.class, this, localizationServerAddress, conf, secretManager, conf.getInt(YarnConfiguration.NM_LOCALIZER_CLIENT_THREAD_COUNT, YarnConfiguration.DEFAULT_NM_LOCALIZER_CLIENT_THREAD_COUNT)); // Enable service authorization? if (conf.getBoolean( CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) { server.refreshServiceAcl(conf, new NMPolicyProvider()); } return server; }
Example #9
Source File: YarnTestBase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public static void populateYarnSecureConfigurations(Configuration conf, String principal, String keytab) { conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true"); conf.set(YarnConfiguration.RM_KEYTAB, keytab); conf.set(YarnConfiguration.RM_PRINCIPAL, principal); conf.set(YarnConfiguration.NM_KEYTAB, keytab); conf.set(YarnConfiguration.NM_PRINCIPAL, principal); conf.set(YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY, principal); conf.set(YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY, keytab); conf.set(YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY, principal); conf.set(YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY, keytab); conf.set("hadoop.security.auth_to_local", "RULE:[1:$1] RULE:[2:$1]"); }
Example #10
Source File: TestRMAdminService.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testServiceAclsRefreshWithLocalConfigurationProvider() { configuration.setBoolean( CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, true); ResourceManager resourceManager = null; try { resourceManager = new ResourceManager(); resourceManager.init(configuration); resourceManager.start(); resourceManager.adminService.refreshServiceAcls(RefreshServiceAclsRequest .newInstance()); } catch (Exception ex) { fail("Using localConfigurationProvider. Should not get any exception."); } finally { if (resourceManager != null) { resourceManager.stop(); } } }
Example #11
Source File: TestJobHistoryEventHandler.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testGetHistoryIntermediateDoneDirForUser() throws IOException { // Test relative path Configuration conf = new Configuration(); conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR, "/mapred/history/done_intermediate"); conf.set(MRJobConfig.USER_NAME, System.getProperty("user.name")); String pathStr = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf); Assert.assertEquals("/mapred/history/done_intermediate/" + System.getProperty("user.name"), pathStr); // Test fully qualified path // Create default configuration pointing to the minicluster conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, dfsCluster.getURI().toString()); FileOutputStream os = new FileOutputStream(coreSitePath); conf.writeXml(os); os.close(); // Simulate execution under a non-default namenode conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "file:///"); pathStr = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf); Assert.assertEquals(dfsCluster.getURI().toString() + "/mapred/history/done_intermediate/" + System.getProperty("user.name"), pathStr); }
Example #12
Source File: ClientServiceDelegate.java From hadoop with Apache License 2.0 | 6 votes |
public ClientServiceDelegate(Configuration conf, ResourceMgrDelegate rm, JobID jobId, MRClientProtocol historyServerProxy) { this.conf = new Configuration(conf); // Cloning for modifying. // For faster redirects from AM to HS. this.conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, this.conf.getInt(MRJobConfig.MR_CLIENT_TO_AM_IPC_MAX_RETRIES, MRJobConfig.DEFAULT_MR_CLIENT_TO_AM_IPC_MAX_RETRIES)); this.conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, this.conf.getInt(MRJobConfig.MR_CLIENT_TO_AM_IPC_MAX_RETRIES_ON_TIMEOUTS, MRJobConfig.DEFAULT_MR_CLIENT_TO_AM_IPC_MAX_RETRIES_ON_TIMEOUTS)); this.rm = rm; this.jobId = jobId; this.historyServerProxy = historyServerProxy; this.appId = TypeConverter.toYarn(jobId).getAppId(); notRunningJobs = new HashMap<JobState, HashMap<String, NotRunningJob>>(); }
Example #13
Source File: TestRMWebServicesHttpStaticUserPermissions.java From hadoop with Apache License 2.0 | 6 votes |
private static void setupAndStartRM() throws Exception { Configuration rmconf = new Configuration(); rmconf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); rmconf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); rmconf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); rmconf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); rmconf.set("yarn.resourcemanager.principal", spnegoPrincipal); rmconf.set("yarn.resourcemanager.keytab", spnegoKeytabFile.getAbsolutePath()); rmconf.setBoolean("mockrm.webapp.enabled", true); UserGroupInformation.setConfiguration(rmconf); rm = new MockRM(rmconf); rm.start(); }
Example #14
Source File: TestOzoneFsHAURLs.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Test OM HA URLs with some unqualified fs.defaultFS. * @throws Exception */ @Test public void testOtherDefaultFS() throws Exception { // Test scenarios where fs.defaultFS isn't a fully qualified o3fs // fs.defaultFS = file:/// testWithDefaultFS(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT); // fs.defaultFS = hdfs://ns1/ testWithDefaultFS("hdfs://ns1/"); // fs.defaultFS = o3fs:/// String unqualifiedFs1 = String.format( "%s:///", OzoneConsts.OZONE_URI_SCHEME); testWithDefaultFS(unqualifiedFs1); // fs.defaultFS = o3fs://bucketName.volumeName/ String unqualifiedFs2 = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName); testWithDefaultFS(unqualifiedFs2); }
Example #15
Source File: TestOzoneFsHAURLs.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Helper function for testOtherDefaultFS(), * run fs -ls o3fs:/// against different fs.defaultFS input. * * @param defaultFS Desired fs.defaultFS to be used in the test * @throws Exception */ private void testWithDefaultFS(String defaultFS) throws Exception { OzoneConfiguration clientConf = new OzoneConfiguration(conf); clientConf.setQuietMode(false); clientConf.set(o3fsImplKey, o3fsImplValue); // fs.defaultFS = file:/// clientConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, defaultFS); FsShell shell = new FsShell(clientConf); try { // Test case: ozone fs -ls o3fs:/// // Expectation: Fail. fs.defaultFS is not a qualified o3fs URI. int res = ToolRunner.run(shell, new String[] {"-ls", "o3fs:///"}); Assert.assertEquals(res, -1); } finally { shell.close(); } }
Example #16
Source File: ApplicationMasterLauncher.java From hadoop with Apache License 2.0 | 6 votes |
@Override protected void serviceInit(Configuration conf) throws Exception { int threadCount = conf.getInt( YarnConfiguration.RM_AMLAUNCHER_THREAD_COUNT, YarnConfiguration.DEFAULT_RM_AMLAUNCHER_THREAD_COUNT); ThreadFactory tf = new ThreadFactoryBuilder() .setNameFormat("ApplicationMasterLauncher #%d") .build(); launcherPool = new ThreadPoolExecutor(threadCount, threadCount, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>()); launcherPool.setThreadFactory(tf); Configuration newConf = new YarnConfiguration(conf); newConf.setInt(CommonConfigurationKeysPublic. IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, conf.getInt(YarnConfiguration.RM_NODEMANAGER_CONNECT_RETIRES, YarnConfiguration.DEFAULT_RM_NODEMANAGER_CONNECT_RETIRES)); setConfig(newConf); super.serviceInit(newConf); }
Example #17
Source File: Server.java From hadoop with Apache License 2.0 | 6 votes |
ConnectionManager() { this.idleScanTimer = new Timer( "IPC Server idle connection scanner for port " + getPort(), true); this.idleScanThreshold = conf.getInt( CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_DEFAULT); this.idleScanInterval = conf.getInt( CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_KEY, CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_DEFAULT); this.maxIdleTime = 2 * conf.getInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT); this.maxIdleToClose = conf.getInt( CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_DEFAULT); this.maxConnections = conf.getInt( CommonConfigurationKeysPublic.IPC_SERVER_MAX_CONNECTIONS_KEY, CommonConfigurationKeysPublic.IPC_SERVER_MAX_CONNECTIONS_DEFAULT); // create a set with concurrency -and- a thread-safe iterator, add 2 // for listener and idle closer threads this.connections = Collections.newSetFromMap( new ConcurrentHashMap<Connection,Boolean>( maxQueueSize, 0.75f, readThreads+2)); }
Example #18
Source File: Client.java From hadoop with Apache License 2.0 | 6 votes |
/** * Returns a ConnectionId object. * @param addr Remote address for the connection. * @param protocol Protocol for RPC. * @param ticket UGI * @param rpcTimeout timeout * @param conf Configuration object * @return A ConnectionId instance * @throws IOException */ static ConnectionId getConnectionId(InetSocketAddress addr, Class<?> protocol, UserGroupInformation ticket, int rpcTimeout, RetryPolicy connectionRetryPolicy, Configuration conf) throws IOException { if (connectionRetryPolicy == null) { final int max = conf.getInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT); final int retryInterval = conf.getInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY, CommonConfigurationKeysPublic .IPC_CLIENT_CONNECT_RETRY_INTERVAL_DEFAULT); connectionRetryPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep( max, retryInterval, TimeUnit.MILLISECONDS); } return new ConnectionId(addr, protocol, ticket, rpcTimeout, connectionRetryPolicy, conf); }
Example #19
Source File: TestOzoneFileSystem.java From hadoop-ozone with Apache License 2.0 | 6 votes |
private void setupOzoneFileSystem() throws IOException, TimeoutException, InterruptedException { OzoneConfiguration conf = new OzoneConfiguration(); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) .build(); cluster.waitForClusterToBeReady(); // create a volume and a bucket to be used by OzoneFileSystem OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster); volumeName = bucket.getVolumeName(); bucketName = bucket.getName(); String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName()); // Set the fs.defaultFS and start the filesystem conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); // Set the number of keys to be processed during batch operate. conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); fs = FileSystem.get(conf); }
Example #20
Source File: TestDelegationTokenRenewer.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { counter = new AtomicInteger(0); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); eventQueue = new LinkedBlockingQueue<Event>(); dispatcher = new AsyncDispatcher(eventQueue); Renewer.reset(); delegationTokenRenewer = createNewDelegationTokenRenewer(conf, counter); RMContext mockContext = mock(RMContext.class); ClientRMService mockClientRMService = mock(ClientRMService.class); when(mockContext.getSystemCredentialsForApps()).thenReturn( new ConcurrentHashMap<ApplicationId, ByteBuffer>()); when(mockContext.getDelegationTokenRenewer()).thenReturn( delegationTokenRenewer); when(mockContext.getDispatcher()).thenReturn(dispatcher); when(mockContext.getClientRMService()).thenReturn(mockClientRMService); InetSocketAddress sockAddr = InetSocketAddress.createUnresolved("localhost", 1234); when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); delegationTokenRenewer.setRMContext(mockContext); delegationTokenRenewer.init(conf); delegationTokenRenewer.start(); }
Example #21
Source File: TestDatanodeManager.java From hadoop with Apache License 2.0 | 5 votes |
@Test (timeout = 100000) public void testRejectUnresolvedDatanodes() throws IOException { //Create the DatanodeManager which will be tested FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); Configuration conf = new Configuration(); //Set configuration property for rejecting unresolved topology mapping conf.setBoolean( DFSConfigKeys.DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_KEY, true); //set TestDatanodeManager.MyResolver to be used for topology resolving conf.setClass( CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, TestDatanodeManager.MyResolver.class, DNSToSwitchMapping.class); //create DatanodeManager DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class), fsn, conf); //storageID to register. String storageID = "someStorageID-123"; DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class); Mockito.when(dr.getDatanodeUuid()).thenReturn(storageID); try { //Register this node dm.registerDatanode(dr); Assert.fail("Expected an UnresolvedTopologyException"); } catch (UnresolvedTopologyException ute) { LOG.info("Expected - topology is not resolved and " + "registration is rejected."); } catch (Exception e) { Assert.fail("Expected an UnresolvedTopologyException"); } }
Example #22
Source File: HistoryClientService.java From hadoop with Apache License 2.0 | 5 votes |
protected void serviceStart() throws Exception { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); initializeWebApp(conf); InetSocketAddress address = conf.getSocketAddr( JHAdminConfig.MR_HISTORY_BIND_HOST, JHAdminConfig.MR_HISTORY_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_PORT); server = rpc.getServer(HSClientProtocol.class, protocolHandler, address, conf, jhsDTSecretManager, conf.getInt(JHAdminConfig.MR_HISTORY_CLIENT_THREAD_COUNT, JHAdminConfig.DEFAULT_MR_HISTORY_CLIENT_THREAD_COUNT)); // Enable service authorization? if (conf.getBoolean( CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) { server.refreshServiceAcl(conf, new ClientHSPolicyProvider()); } server.start(); this.bindAddress = conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_BIND_HOST, JHAdminConfig.MR_HISTORY_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS, server.getListenerAddress()); LOG.info("Instantiated HistoryClientService at " + this.bindAddress); super.serviceStart(); }
Example #23
Source File: TestHSAdminServer.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void init() throws HadoopIllegalArgumentException, IOException { conf = new JobConf(); conf.set(JHAdminConfig.JHS_ADMIN_ADDRESS, "0.0.0.0:0"); conf.setClass("hadoop.security.group.mapping", MockUnixGroupsMapping.class, GroupMappingServiceProvider.class); conf.setLong("hadoop.security.groups.cache.secs", groupRefreshTimeoutSec); conf.setBoolean( CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, securityEnabled); Groups.getUserToGroupsMappingService(conf); jobHistoryService = mock(JobHistory.class); alds = mock(AggregatedLogDeletionService.class); hsAdminServer = new HSAdminServer(alds, jobHistoryService) { @Override protected Configuration createConf() { return conf; } }; hsAdminServer.init(conf); hsAdminServer.start(); conf.setSocketAddr(JHAdminConfig.JHS_ADMIN_ADDRESS, hsAdminServer.clientRpcServer.getListenerAddress()); hsAdminClient = new HSAdmin(conf); }
Example #24
Source File: TestHistoryFileManager.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testCreateDirsWithAdditionalFileSystem() throws Exception { dfsCluster.getFileSystem().setSafeMode( HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); dfsCluster2.getFileSystem().setSafeMode( HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); Assert.assertFalse(dfsCluster.getFileSystem().isInSafeMode()); Assert.assertFalse(dfsCluster2.getFileSystem().isInSafeMode()); // Set default configuration to the first cluster Configuration conf = new Configuration(false); conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, dfsCluster.getURI().toString()); FileOutputStream os = new FileOutputStream(coreSitePath); conf.writeXml(os); os.close(); testTryCreateHistoryDirs(dfsCluster2.getConfiguration(0), true); // Directories should be created only in the default file system (dfsCluster) Assert.assertTrue(dfsCluster.getFileSystem() .exists(new Path(getDoneDirNameForTest()))); Assert.assertTrue(dfsCluster.getFileSystem() .exists(new Path(getIntermediateDoneDirNameForTest()))); Assert.assertFalse(dfsCluster2.getFileSystem() .exists(new Path(getDoneDirNameForTest()))); Assert.assertFalse(dfsCluster2.getFileSystem() .exists(new Path(getIntermediateDoneDirNameForTest()))); }
Example #25
Source File: TestIPC.java From hadoop with Apache License 2.0 | 5 votes |
@Test(timeout=60000) public void testConnectionRetriesOnSocketTimeoutExceptions() throws IOException { Configuration conf = new Configuration(); // set max retries to 0 conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, 0); assertRetriesOnSocketTimeouts(conf, 1); // set max retries to 3 conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, 3); assertRetriesOnSocketTimeouts(conf, 4); }
Example #26
Source File: TestAMRMClientContainerRequest.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testInvalidValidWhenOldRemoved() { AMRMClientImpl<ContainerRequest> client = new AMRMClientImpl<ContainerRequest>(); Configuration conf = new Configuration(); conf.setClass( CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class); client.init(conf); Resource capability = Resource.newInstance(1024, 1, 1); ContainerRequest request1 = new ContainerRequest(capability, new String[] {"host1", "host2"}, null, Priority.newInstance(1), false); client.addContainerRequest(request1); client.removeContainerRequest(request1); ContainerRequest request2 = new ContainerRequest(capability, new String[] {"host3"}, null, Priority.newInstance(1), true); client.addContainerRequest(request2); client.removeContainerRequest(request2); ContainerRequest request3 = new ContainerRequest(capability, new String[] {"host1", "host2"}, null, Priority.newInstance(1), false); client.addContainerRequest(request3); client.removeContainerRequest(request3); ContainerRequest request4 = new ContainerRequest(capability, null, new String[] {"rack1"}, Priority.newInstance(1), true); client.addContainerRequest(request4); }
Example #27
Source File: TestAMAuthorization.java From hadoop with Apache License 2.0 | 5 votes |
@Parameters public static Collection<Object[]> configs() { Configuration conf = new Configuration(); Configuration confWithSecurity = new Configuration(); confWithSecurity.set( CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, UserGroupInformation.AuthenticationMethod.KERBEROS.toString()); return Arrays.asList(new Object[][] {{ conf }, { confWithSecurity} }); }
Example #28
Source File: TestRMWebServicesDelegationTokenAuthentication.java From hadoop with Apache License 2.0 | 5 votes |
private static void setupAndStartRM() throws Exception { Configuration rmconf = new Configuration(); rmconf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); rmconf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); rmconf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); String httpPrefix = "hadoop.http.authentication."; rmconf.setStrings(httpPrefix + "type", "kerberos"); rmconf.set(httpPrefix + KerberosAuthenticationHandler.PRINCIPAL, httpSpnegoPrincipal); rmconf.set(httpPrefix + KerberosAuthenticationHandler.KEYTAB, httpSpnegoKeytabFile.getAbsolutePath()); // use any file for signature secret rmconf.set(httpPrefix + AuthenticationFilter.SIGNATURE_SECRET + ".file", httpSpnegoKeytabFile.getAbsolutePath()); rmconf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); rmconf.setBoolean(YarnConfiguration.RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER, true); rmconf.set("hadoop.http.filter.initializers", AuthenticationFilterInitializer.class.getName()); rmconf.set(YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY, httpSpnegoPrincipal); rmconf.set(YarnConfiguration.RM_KEYTAB, httpSpnegoKeytabFile.getAbsolutePath()); rmconf.set(YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY, httpSpnegoKeytabFile.getAbsolutePath()); rmconf.set(YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY, httpSpnegoPrincipal); rmconf.set(YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY, httpSpnegoKeytabFile.getAbsolutePath()); rmconf.setBoolean("mockrm.webapp.enabled", true); rmconf.set("yarn.resourcemanager.proxyuser.client.hosts", "*"); rmconf.set("yarn.resourcemanager.proxyuser.client.groups", "*"); UserGroupInformation.setConfiguration(rmconf); rm = new MockRM(rmconf); rm.start(); }
Example #29
Source File: TestRMRestart.java From hadoop with Apache License 2.0 | 5 votes |
@Test (timeout = 60000) public void testAppSubmissionWithOldDelegationTokenAfterRMRestart() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); conf.set(YarnConfiguration.RM_ADDRESS, "localhost:8032"); UserGroupInformation.setConfiguration(conf); MemoryRMStateStore memStore = new MemoryRMStateStore(); memStore.init(conf); MockRM rm1 = new TestSecurityMockRM(conf, memStore); rm1.start(); GetDelegationTokenRequest request1 = GetDelegationTokenRequest.newInstance("renewer1"); UserGroupInformation.getCurrentUser().setAuthenticationMethod( AuthMethod.KERBEROS); GetDelegationTokenResponse response1 = rm1.getClientRMService().getDelegationToken(request1); Token<RMDelegationTokenIdentifier> token1 = ConverterUtils.convertFromYarn(response1.getRMDelegationToken(), rmAddr); // start new RM MockRM rm2 = new TestSecurityMockRM(conf, memStore); rm2.start(); // submit an app with the old delegation token got from previous RM. Credentials ts = new Credentials(); ts.addToken(token1.getService(), token1); RMApp app = rm2.submitApp(200, "name", "user", new HashMap<ApplicationAccessType, String>(), false, "default", 1, ts); rm2.waitForState(app.getApplicationId(), RMAppState.ACCEPTED); }
Example #30
Source File: HAServiceTarget.java From hadoop with Apache License 2.0 | 5 votes |
private HAServiceProtocol getProxyForAddress(Configuration conf, int timeoutMs, InetSocketAddress addr) throws IOException { Configuration confCopy = new Configuration(conf); // Lower the timeout so we quickly fail to connect confCopy.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1); SocketFactory factory = NetUtils.getDefaultSocketFactory(confCopy); return new HAServiceProtocolClientSideTranslatorPB( addr, confCopy, factory, timeoutMs); }