org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Dispatcher.java From hadoop with Apache License 2.0 | 6 votes |
public Dispatcher(NameNodeConnector nnc, Set<String> includedNodes, Set<String> excludedNodes, long movedWinWidth, int moverThreads, int dispatcherThreads, int maxConcurrentMovesPerNode, Configuration conf) { this.nnc = nnc; this.excludedNodes = excludedNodes; this.includedNodes = includedNodes; this.movedBlocks = new MovedBlocks<StorageGroup>(movedWinWidth); this.cluster = NetworkTopology.getInstance(conf); this.moveExecutor = Executors.newFixedThreadPool(moverThreads); this.dispatchExecutor = dispatcherThreads == 0? null : Executors.newFixedThreadPool(dispatcherThreads); this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode; this.saslClient = new SaslDataTransferClient(conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf), TrustedChannelResolver.getInstance(conf), nnc.fallbackToSimpleAuth); }
Example #2
Source File: Dispatcher.java From big-c with Apache License 2.0 | 6 votes |
public Dispatcher(NameNodeConnector nnc, Set<String> includedNodes, Set<String> excludedNodes, long movedWinWidth, int moverThreads, int dispatcherThreads, int maxConcurrentMovesPerNode, Configuration conf) { this.nnc = nnc; this.excludedNodes = excludedNodes; this.includedNodes = includedNodes; this.movedBlocks = new MovedBlocks<StorageGroup>(movedWinWidth); this.cluster = NetworkTopology.getInstance(conf); this.moveExecutor = Executors.newFixedThreadPool(moverThreads); this.dispatchExecutor = dispatcherThreads == 0? null : Executors.newFixedThreadPool(dispatcherThreads); this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode; this.saslClient = new SaslDataTransferClient(conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf), TrustedChannelResolver.getInstance(conf), nnc.fallbackToSimpleAuth); }
Example #3
Source File: DFSClient.java From hadoop with Apache License 2.0 | 4 votes |
/** * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode. * If HA is enabled and a positive value is set for * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode * must be null. */ @VisibleForTesting public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats) throws IOException { SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX); traceSampler = new SamplerBuilder(TraceUtils. wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)).build(); // Copy only the required DFSClient configuration this.dfsClientConf = new Conf(conf); if (this.dfsClientConf.useLegacyBlockReaderLocal) { LOG.debug("Using legacy short-circuit local reads."); } this.conf = conf; this.stats = stats; this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority(); this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId(); int numResponseToDrop = conf.getInt( DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null; AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false); if (numResponseToDrop > 0) { // This case is used for testing. LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to " + numResponseToDrop + ", this hacked client will proactively drop responses"); proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, nameNodeUri, ClientProtocol.class, numResponseToDrop, nnFallbackToSimpleAuth); } if (proxyInfo != null) { this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } else if (rpcNamenode != null) { // This case is used for testing. Preconditions.checkArgument(nameNodeUri == null); this.namenode = rpcNamenode; dtService = null; } else { Preconditions.checkArgument(nameNodeUri != null, "null URI"); proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class, nnFallbackToSimpleAuth); this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } String localInterfaces[] = conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES); localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces); if (LOG.isDebugEnabled() && 0 != localInterfaces.length) { LOG.debug("Using local interfaces [" + Joiner.on(',').join(localInterfaces)+ "] with addresses [" + Joiner.on(',').join(localInterfaceAddrs) + "]"); } Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false); Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ? null : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0); Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false); this.defaultReadCachingStrategy = new CachingStrategy(readDropBehind, readahead); this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead); this.clientContext = ClientContext.get( conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT), dfsClientConf); this.hedgedReadThresholdMillis = conf.getLong( DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS); int numThreads = conf.getInt( DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE); if (numThreads > 0) { this.initThreadsNumForHedgedReads(numThreads); } this.saslClient = new SaslDataTransferClient( conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf), TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth); }
Example #4
Source File: DNConf.java From hadoop with Apache License 2.0 | 4 votes |
public DNConf(Configuration conf) { this.conf = conf; socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsServerConstants.READ_TIMEOUT); socketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, HdfsServerConstants.WRITE_TIMEOUT); socketKeepaliveTimeout = conf.getInt( DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT); /* Based on results on different platforms, we might need set the default * to false on some of them. */ transferToAllowed = conf.getBoolean( DFS_DATANODE_TRANSFERTO_ALLOWED_KEY, DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT); writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT); readaheadLength = conf.getLong( DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); dropCacheBehindWrites = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY, DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_DEFAULT); syncBehindWrites = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_KEY, DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_DEFAULT); syncBehindWritesInBackground = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_IN_BACKGROUND_KEY, DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_IN_BACKGROUND_DEFAULT); dropCacheBehindReads = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY, DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_DEFAULT); connectToDnViaHostname = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT); this.blockReportInterval = conf.getLong(DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT); this.blockReportSplitThreshold = conf.getLong(DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY, DFS_BLOCKREPORT_SPLIT_THRESHOLD_DEFAULT); this.cacheReportInterval = conf.getLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT); this.dfsclientSlowIoWarningThresholdMs = conf.getLong( DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY, DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT); this.datanodeSlowIoWarningThresholdMs = conf.getLong( DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY, DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT); long initBRDelay = conf.getLong( DFS_BLOCKREPORT_INITIAL_DELAY_KEY, DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT) * 1000L; if (initBRDelay >= blockReportInterval) { initBRDelay = 0; DataNode.LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:"); } initialBlockReportDelay = initBRDelay; heartBeatInterval = conf.getLong(DFS_HEARTBEAT_INTERVAL_KEY, DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000L; this.deleteReportInterval = 100 * heartBeatInterval; // do we need to sync block file contents to disk when blockfile is closed? this.syncOnClose = conf.getBoolean(DFS_DATANODE_SYNCONCLOSE_KEY, DFS_DATANODE_SYNCONCLOSE_DEFAULT); this.minimumNameNodeVersion = conf.get(DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT); this.encryptDataTransfer = conf.getBoolean(DFS_ENCRYPT_DATA_TRANSFER_KEY, DFS_ENCRYPT_DATA_TRANSFER_DEFAULT); this.encryptionAlgorithm = conf.get(DFS_DATA_ENCRYPTION_ALGORITHM_KEY); this.trustedChannelResolver = TrustedChannelResolver.getInstance(conf); this.saslPropsResolver = DataTransferSaslUtil.getSaslPropertiesResolver( conf); this.ignoreSecurePortsForTesting = conf.getBoolean( IGNORE_SECURE_PORTS_FOR_TESTING_KEY, IGNORE_SECURE_PORTS_FOR_TESTING_DEFAULT); this.xceiverStopTimeout = conf.getLong( DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT); this.maxLockedMemory = conf.getLong( DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT); this.restartReplicaExpiry = conf.getLong( DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY, DFS_DATANODE_RESTART_REPLICA_EXPIRY_DEFAULT) * 1000L; }
Example #5
Source File: DFSClient.java From big-c with Apache License 2.0 | 4 votes |
/** * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode. * If HA is enabled and a positive value is set for * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode * must be null. */ @VisibleForTesting public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats) throws IOException { SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX); traceSampler = new SamplerBuilder(TraceUtils. wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)).build(); // Copy only the required DFSClient configuration this.dfsClientConf = new Conf(conf); if (this.dfsClientConf.useLegacyBlockReaderLocal) { LOG.debug("Using legacy short-circuit local reads."); } this.conf = conf; this.stats = stats; this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority(); this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId(); int numResponseToDrop = conf.getInt( DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null; AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false); if (numResponseToDrop > 0) { // This case is used for testing. LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to " + numResponseToDrop + ", this hacked client will proactively drop responses"); proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, nameNodeUri, ClientProtocol.class, numResponseToDrop, nnFallbackToSimpleAuth); } if (proxyInfo != null) { this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } else if (rpcNamenode != null) { // This case is used for testing. Preconditions.checkArgument(nameNodeUri == null); this.namenode = rpcNamenode; dtService = null; } else { Preconditions.checkArgument(nameNodeUri != null, "null URI"); proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class, nnFallbackToSimpleAuth); this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } String localInterfaces[] = conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES); localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces); if (LOG.isDebugEnabled() && 0 != localInterfaces.length) { LOG.debug("Using local interfaces [" + Joiner.on(',').join(localInterfaces)+ "] with addresses [" + Joiner.on(',').join(localInterfaceAddrs) + "]"); } Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false); Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ? null : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0); Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false); this.defaultReadCachingStrategy = new CachingStrategy(readDropBehind, readahead); this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead); this.clientContext = ClientContext.get( conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT), dfsClientConf); this.hedgedReadThresholdMillis = conf.getLong( DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS); int numThreads = conf.getInt( DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE); if (numThreads > 0) { this.initThreadsNumForHedgedReads(numThreads); } this.saslClient = new SaslDataTransferClient( conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf), TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth); }
Example #6
Source File: DNConf.java From big-c with Apache License 2.0 | 4 votes |
public DNConf(Configuration conf) { this.conf = conf; socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsServerConstants.READ_TIMEOUT); socketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, HdfsServerConstants.WRITE_TIMEOUT); socketKeepaliveTimeout = conf.getInt( DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT); /* Based on results on different platforms, we might need set the default * to false on some of them. */ transferToAllowed = conf.getBoolean( DFS_DATANODE_TRANSFERTO_ALLOWED_KEY, DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT); writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT); readaheadLength = conf.getLong( DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); dropCacheBehindWrites = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY, DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_DEFAULT); syncBehindWrites = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_KEY, DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_DEFAULT); syncBehindWritesInBackground = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_IN_BACKGROUND_KEY, DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_IN_BACKGROUND_DEFAULT); dropCacheBehindReads = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY, DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_DEFAULT); connectToDnViaHostname = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT); this.blockReportInterval = conf.getLong(DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT); this.blockReportSplitThreshold = conf.getLong(DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY, DFS_BLOCKREPORT_SPLIT_THRESHOLD_DEFAULT); this.cacheReportInterval = conf.getLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT); this.dfsclientSlowIoWarningThresholdMs = conf.getLong( DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY, DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT); this.datanodeSlowIoWarningThresholdMs = conf.getLong( DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY, DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT); long initBRDelay = conf.getLong( DFS_BLOCKREPORT_INITIAL_DELAY_KEY, DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT) * 1000L; if (initBRDelay >= blockReportInterval) { initBRDelay = 0; DataNode.LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:"); } initialBlockReportDelay = initBRDelay; heartBeatInterval = conf.getLong(DFS_HEARTBEAT_INTERVAL_KEY, DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000L; this.deleteReportInterval = 100 * heartBeatInterval; // do we need to sync block file contents to disk when blockfile is closed? this.syncOnClose = conf.getBoolean(DFS_DATANODE_SYNCONCLOSE_KEY, DFS_DATANODE_SYNCONCLOSE_DEFAULT); this.minimumNameNodeVersion = conf.get(DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT); this.encryptDataTransfer = conf.getBoolean(DFS_ENCRYPT_DATA_TRANSFER_KEY, DFS_ENCRYPT_DATA_TRANSFER_DEFAULT); this.encryptionAlgorithm = conf.get(DFS_DATA_ENCRYPTION_ALGORITHM_KEY); this.trustedChannelResolver = TrustedChannelResolver.getInstance(conf); this.saslPropsResolver = DataTransferSaslUtil.getSaslPropertiesResolver( conf); this.ignoreSecurePortsForTesting = conf.getBoolean( IGNORE_SECURE_PORTS_FOR_TESTING_KEY, IGNORE_SECURE_PORTS_FOR_TESTING_DEFAULT); this.xceiverStopTimeout = conf.getLong( DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT); this.maxLockedMemory = conf.getLong( DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT); this.restartReplicaExpiry = conf.getLong( DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY, DFS_DATANODE_RESTART_REPLICA_EXPIRY_DEFAULT) * 1000L; }
Example #7
Source File: FanOutOneBlockAsyncDFSOutputSaslHelper.java From hbase with Apache License 2.0 | 4 votes |
static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo dnInfo, int timeoutMs, DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Void> saslPromise) throws IOException { SaslDataTransferClient saslClient = client.getSaslDataTransferClient(); SaslPropertiesResolver saslPropsResolver = SASL_ADAPTOR.getSaslPropsResolver(saslClient); TrustedChannelResolver trustedChannelResolver = SASL_ADAPTOR.getTrustedChannelResolver(saslClient); AtomicBoolean fallbackToSimpleAuth = SASL_ADAPTOR.getFallbackToSimpleAuth(saslClient); InetAddress addr = ((InetSocketAddress) channel.remoteAddress()).getAddress(); if (trustedChannelResolver.isTrusted() || trustedChannelResolver.isTrusted(addr)) { saslPromise.trySuccess(null); return; } DataEncryptionKey encryptionKey = client.newDataEncryptionKey(); if (encryptionKey != null) { if (LOG.isDebugEnabled()) { LOG.debug( "SASL client doing encrypted handshake for addr = " + addr + ", datanodeId = " + dnInfo); } doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey), encryptionKeyToPassword(encryptionKey.encryptionKey), createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise, client); } else if (!UserGroupInformation.isSecurityEnabled()) { if (LOG.isDebugEnabled()) { LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr + ", datanodeId = " + dnInfo); } saslPromise.trySuccess(null); } else if (dnInfo.getXferPort() < 1024) { if (LOG.isDebugEnabled()) { LOG.debug("SASL client skipping handshake in secured configuration with " + "privileged port for addr = " + addr + ", datanodeId = " + dnInfo); } saslPromise.trySuccess(null); } else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) { if (LOG.isDebugEnabled()) { LOG.debug("SASL client skipping handshake in secured configuration with " + "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo); } saslPromise.trySuccess(null); } else if (saslPropsResolver != null) { if (LOG.isDebugEnabled()) { LOG.debug( "SASL client doing general handshake for addr = " + addr + ", datanodeId = " + dnInfo); } doSaslNegotiation(conf, channel, timeoutMs, buildUsername(accessToken), buildClientPassword(accessToken), saslPropsResolver.getClientProperties(addr), saslPromise, client); } else { // It's a secured cluster using non-privileged ports, but no SASL. The only way this can // happen is if the DataNode has ignore.secure.ports.for.testing configured, so this is a rare // edge case. if (LOG.isDebugEnabled()) { LOG.debug("SASL client skipping handshake in secured configuration with no SASL " + "protection configured for addr = " + addr + ", datanodeId = " + dnInfo); } saslPromise.trySuccess(null); } }
Example #8
Source File: SaslDataTransferClient.java From hadoop with Apache License 2.0 | 3 votes |
/** * Creates a new SaslDataTransferClient. * * @param conf the configuration * @param saslPropsResolver for determining properties of SASL negotiation * @param trustedChannelResolver for identifying trusted connections that do * not require SASL negotiation * @param fallbackToSimpleAuth checked on each attempt at general SASL * handshake, if true forces use of simple auth */ public SaslDataTransferClient(Configuration conf, SaslPropertiesResolver saslPropsResolver, TrustedChannelResolver trustedChannelResolver, AtomicBoolean fallbackToSimpleAuth) { this.conf = conf; this.fallbackToSimpleAuth = fallbackToSimpleAuth; this.saslPropsResolver = saslPropsResolver; this.trustedChannelResolver = trustedChannelResolver; }
Example #9
Source File: SaslDataTransferClient.java From big-c with Apache License 2.0 | 3 votes |
/** * Creates a new SaslDataTransferClient. * * @param conf the configuration * @param saslPropsResolver for determining properties of SASL negotiation * @param trustedChannelResolver for identifying trusted connections that do * not require SASL negotiation * @param fallbackToSimpleAuth checked on each attempt at general SASL * handshake, if true forces use of simple auth */ public SaslDataTransferClient(Configuration conf, SaslPropertiesResolver saslPropsResolver, TrustedChannelResolver trustedChannelResolver, AtomicBoolean fallbackToSimpleAuth) { this.conf = conf; this.fallbackToSimpleAuth = fallbackToSimpleAuth; this.saslPropsResolver = saslPropsResolver; this.trustedChannelResolver = trustedChannelResolver; }
Example #10
Source File: SaslDataTransferClient.java From hadoop with Apache License 2.0 | 2 votes |
/** * Creates a new SaslDataTransferClient. This constructor is used in cases * where it is not relevant to track if a secure client did a fallback to * simple auth. For intra-cluster connections between data nodes in the same * cluster, we can assume that all run under the same security configuration. * * @param conf the configuration * @param saslPropsResolver for determining properties of SASL negotiation * @param trustedChannelResolver for identifying trusted connections that do * not require SASL negotiation */ public SaslDataTransferClient(Configuration conf, SaslPropertiesResolver saslPropsResolver, TrustedChannelResolver trustedChannelResolver) { this(conf, saslPropsResolver, trustedChannelResolver, null); }
Example #11
Source File: DNConf.java From hadoop with Apache License 2.0 | 2 votes |
/** * Returns the TrustedChannelResolver configured for use with * DataTransferProtocol, or null if not configured. * * @return TrustedChannelResolver configured for use with DataTransferProtocol */ public TrustedChannelResolver getTrustedChannelResolver() { return trustedChannelResolver; }
Example #12
Source File: SaslDataTransferClient.java From big-c with Apache License 2.0 | 2 votes |
/** * Creates a new SaslDataTransferClient. This constructor is used in cases * where it is not relevant to track if a secure client did a fallback to * simple auth. For intra-cluster connections between data nodes in the same * cluster, we can assume that all run under the same security configuration. * * @param conf the configuration * @param saslPropsResolver for determining properties of SASL negotiation * @param trustedChannelResolver for identifying trusted connections that do * not require SASL negotiation */ public SaslDataTransferClient(Configuration conf, SaslPropertiesResolver saslPropsResolver, TrustedChannelResolver trustedChannelResolver) { this(conf, saslPropsResolver, trustedChannelResolver, null); }
Example #13
Source File: DNConf.java From big-c with Apache License 2.0 | 2 votes |
/** * Returns the TrustedChannelResolver configured for use with * DataTransferProtocol, or null if not configured. * * @return TrustedChannelResolver configured for use with DataTransferProtocol */ public TrustedChannelResolver getTrustedChannelResolver() { return trustedChannelResolver; }
Example #14
Source File: FanOutOneBlockAsyncDFSOutputSaslHelper.java From hbase with Apache License 2.0 | votes |
TrustedChannelResolver getTrustedChannelResolver(SaslDataTransferClient saslClient);