Java Code Examples for org.apache.hadoop.net.NetUtils#getSocketFactory()
The following examples show how to use
org.apache.hadoop.net.NetUtils#getSocketFactory() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FileChecksumServlets.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** {@inheritDoc} */ public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { final UnixUserGroupInformation ugi = getUGI(request); final PrintWriter out = response.getWriter(); final String filename = getFilename(request, response); final XMLOutputter xml = new XMLOutputter(out, "UTF-8"); xml.declaration(); final Configuration conf = new Configuration(DataNode.getDataNode().getConf()); final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT); final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi); final ClientProtocol nnproxy = DFSClient.createNamenode(conf); try { final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum( filename, nnproxy, socketFactory, socketTimeout); MD5MD5CRC32FileChecksum.write(xml, checksum); } catch(IOException ioe) { new RemoteException(ioe.getClass().getName(), ioe.getMessage() ).writeXml(filename, xml); } xml.endDocument(); }
Example 2
Source File: HadoopConnectingFileSystemProvider.java From CloverETL-Engine with GNU Lesser General Public License v2.1 | 5 votes |
private void doTestConnection(URI host, Configuration hadoopConfiguration) throws IOException { SocketFactory socketFactory = NetUtils.getSocketFactory(hadoopConfiguration, ClientProtocol.class); Socket socket = socketFactory.createSocket(); socket.setTcpNoDelay(false); SocketAddress address = new InetSocketAddress(host.getHost(), host.getPort()); try { NetUtils.connect(socket, address, CONNECTION_TEST_TIMEOUT); } finally { try { socket.close(); } catch (IOException e) {} } }
Example 3
Source File: FileChecksumServlets.java From RDFS with Apache License 2.0 | 5 votes |
/** {@inheritDoc} */ public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { final UnixUserGroupInformation ugi = getUGI(request); final PrintWriter out = response.getWriter(); final String filename = getFilename(request, response); final XMLOutputter xml = new XMLOutputter(out, "UTF-8"); xml.declaration(); Configuration daemonConf = (Configuration) getServletContext() .getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE); final Configuration conf = (daemonConf == null) ? new Configuration() : new Configuration(daemonConf); final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT); final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi); final ProtocolProxy<ClientProtocol> nnproxy = DFSClient.createRPCNamenode(conf); try { final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum( DataTransferProtocol.DATA_TRANSFER_VERSION, filename, nnproxy.getProxy(), nnproxy, socketFactory, socketTimeout); MD5MD5CRC32FileChecksum.write(xml, checksum); } catch(IOException ioe) { new RemoteException(ioe.getClass().getName(), ioe.getMessage() ).writeXml(filename, xml); } xml.endDocument(); }
Example 4
Source File: DFSClient.java From hadoop with Apache License 2.0 | 4 votes |
/** * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode. * If HA is enabled and a positive value is set for * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode * must be null. */ @VisibleForTesting public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats) throws IOException { SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX); traceSampler = new SamplerBuilder(TraceUtils. wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)).build(); // Copy only the required DFSClient configuration this.dfsClientConf = new Conf(conf); if (this.dfsClientConf.useLegacyBlockReaderLocal) { LOG.debug("Using legacy short-circuit local reads."); } this.conf = conf; this.stats = stats; this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority(); this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId(); int numResponseToDrop = conf.getInt( DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null; AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false); if (numResponseToDrop > 0) { // This case is used for testing. LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to " + numResponseToDrop + ", this hacked client will proactively drop responses"); proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, nameNodeUri, ClientProtocol.class, numResponseToDrop, nnFallbackToSimpleAuth); } if (proxyInfo != null) { this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } else if (rpcNamenode != null) { // This case is used for testing. Preconditions.checkArgument(nameNodeUri == null); this.namenode = rpcNamenode; dtService = null; } else { Preconditions.checkArgument(nameNodeUri != null, "null URI"); proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class, nnFallbackToSimpleAuth); this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } String localInterfaces[] = conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES); localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces); if (LOG.isDebugEnabled() && 0 != localInterfaces.length) { LOG.debug("Using local interfaces [" + Joiner.on(',').join(localInterfaces)+ "] with addresses [" + Joiner.on(',').join(localInterfaceAddrs) + "]"); } Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false); Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ? null : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0); Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false); this.defaultReadCachingStrategy = new CachingStrategy(readDropBehind, readahead); this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead); this.clientContext = ClientContext.get( conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT), dfsClientConf); this.hedgedReadThresholdMillis = conf.getLong( DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS); int numThreads = conf.getInt( DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE); if (numThreads > 0) { this.initThreadsNumForHedgedReads(numThreads); } this.saslClient = new SaslDataTransferClient( conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf), TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth); }
Example 5
Source File: DFSClient.java From big-c with Apache License 2.0 | 4 votes |
/** * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode. * If HA is enabled and a positive value is set for * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode * must be null. */ @VisibleForTesting public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats) throws IOException { SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX); traceSampler = new SamplerBuilder(TraceUtils. wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)).build(); // Copy only the required DFSClient configuration this.dfsClientConf = new Conf(conf); if (this.dfsClientConf.useLegacyBlockReaderLocal) { LOG.debug("Using legacy short-circuit local reads."); } this.conf = conf; this.stats = stats; this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority(); this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId(); int numResponseToDrop = conf.getInt( DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null; AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false); if (numResponseToDrop > 0) { // This case is used for testing. LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to " + numResponseToDrop + ", this hacked client will proactively drop responses"); proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, nameNodeUri, ClientProtocol.class, numResponseToDrop, nnFallbackToSimpleAuth); } if (proxyInfo != null) { this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } else if (rpcNamenode != null) { // This case is used for testing. Preconditions.checkArgument(nameNodeUri == null); this.namenode = rpcNamenode; dtService = null; } else { Preconditions.checkArgument(nameNodeUri != null, "null URI"); proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class, nnFallbackToSimpleAuth); this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } String localInterfaces[] = conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES); localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces); if (LOG.isDebugEnabled() && 0 != localInterfaces.length) { LOG.debug("Using local interfaces [" + Joiner.on(',').join(localInterfaces)+ "] with addresses [" + Joiner.on(',').join(localInterfaceAddrs) + "]"); } Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false); Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ? null : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0); Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false); this.defaultReadCachingStrategy = new CachingStrategy(readDropBehind, readahead); this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead); this.clientContext = ClientContext.get( conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT), dfsClientConf); this.hedgedReadThresholdMillis = conf.getLong( DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS); int numThreads = conf.getInt( DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE); if (numThreads > 0) { this.initThreadsNumForHedgedReads(numThreads); } this.saslClient = new SaslDataTransferClient( conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf), TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth); }
Example 6
Source File: DFSClient.java From RDFS with Apache License 2.0 | 4 votes |
/** * Create a new DFSClient connected to the given nameNodeAddr or rpcNamenode. * Exactly one of nameNodeAddr or rpcNamenode must be null. */ DFSClient(InetSocketAddress nameNodeAddr, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats) throws IOException { this.conf = conf; this.stats = stats; this.socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT); this.socketReadExtentionTimeout = conf.getInt( HdfsConstants.DFS_DATANODE_READ_EXTENSION, HdfsConstants.READ_TIMEOUT_EXTENSION); this.timeoutValue = this.socketTimeout; this.datanodeWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", HdfsConstants.WRITE_TIMEOUT); this.datanodeWriteExtentionTimeout = conf.getInt( HdfsConstants.DFS_DATANODE_WRITE_EXTENTSION, HdfsConstants.WRITE_TIMEOUT_EXTENSION); this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); // dfs.write.packet.size is an internal config variable this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024); this.minReadSpeedBps = conf.getLong("dfs.min.read.speed.bps", -1); this.maxBlockAcquireFailures = getMaxBlockAcquireFailures(conf); this.localHost = InetAddress.getLocalHost(); // fetch network location of localhost this.pseuDatanodeInfoForLocalhost = new DatanodeInfo(new DatanodeID( this.localHost.getHostAddress())); this.dnsToSwitchMapping = ReflectionUtils.newInstance( conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class, DNSToSwitchMapping.class), conf); ArrayList<String> tempList = new ArrayList<String>(); tempList.add(this.localHost.getHostName()); List<String> retList = dnsToSwitchMapping.resolve(tempList); if (retList != null && retList.size() > 0) { localhostNetworkLocation = retList.get(0); this.pseuDatanodeInfoForLocalhost.setNetworkLocation(localhostNetworkLocation); } // The hdfsTimeout is currently the same as the ipc timeout this.hdfsTimeout = Client.getTimeout(conf); this.closeFileTimeout = conf.getLong("dfs.client.closefile.timeout", this.hdfsTimeout); try { this.ugi = UnixUserGroupInformation.login(conf, true); } catch (LoginException e) { throw (IOException)(new IOException().initCause(e)); } String taskId = conf.get("mapred.task.id"); if (taskId != null) { this.clientName = "DFSClient_" + taskId + "_" + r.nextInt() + "_" + Thread.currentThread().getId(); } else { this.clientName = "DFSClient_" + r.nextInt(); } defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE); defaultReplication = (short) conf.getInt("dfs.replication", 3); if (nameNodeAddr != null && rpcNamenode == null) { this.nameNodeAddr = nameNodeAddr; getNameNode(); } else if (nameNodeAddr == null && rpcNamenode != null) { //This case is used for testing. if (rpcNamenode instanceof NameNode) { this.namenodeProtocolProxy = createRPCNamenode(((NameNode)rpcNamenode).getNameNodeAddress(), conf, ugi); } this.namenode = this.rpcNamenode = rpcNamenode; } else { throw new IllegalArgumentException( "Expecting exactly one of nameNodeAddr and rpcNamenode being null: " + "nameNodeAddr=" + nameNodeAddr + ", rpcNamenode=" + rpcNamenode); } // read directly from the block file if configured. this.shortCircuitLocalReads = conf.getBoolean("dfs.read.shortcircuit", false); if (this.shortCircuitLocalReads) { LOG.debug("Configured to shortcircuit reads to " + localHost); } this.leasechecker = new LeaseChecker(this.clientName, this.conf); // by default, if the ipTosValue is less than 0(for example -1), // we will not set it in the socket. this.ipTosValue = conf.getInt("dfs.client.tos.value", NetUtils.NOT_SET_IP_TOS); if (this.ipTosValue > NetUtils.IP_TOS_MAX_VALUE) { LOG.warn("dfs.client.tos.value " + ipTosValue + " exceeds the max allowed value " + NetUtils.IP_TOS_MAX_VALUE + ", will not take affect"); this.ipTosValue = NetUtils.NOT_SET_IP_TOS; } }
Example 7
Source File: DFSClient.java From hadoop-gpu with Apache License 2.0 | 4 votes |
/** * Create a new DFSClient connected to the given nameNodeAddr or rpcNamenode. * Exactly one of nameNodeAddr or rpcNamenode must be null. */ DFSClient(InetSocketAddress nameNodeAddr, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats) throws IOException { this.conf = conf; this.stats = stats; this.socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT); this.datanodeWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", HdfsConstants.WRITE_TIMEOUT); this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); // dfs.write.packet.size is an internal config variable this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024); this.maxBlockAcquireFailures = conf.getInt("dfs.client.max.block.acquire.failures", MAX_BLOCK_ACQUIRE_FAILURES); try { this.ugi = UnixUserGroupInformation.login(conf, true); } catch (LoginException e) { throw (IOException)(new IOException().initCause(e)); } String taskId = conf.get("mapred.task.id"); if (taskId != null) { this.clientName = "DFSClient_" + taskId; } else { this.clientName = "DFSClient_" + r.nextInt(); } defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE); defaultReplication = (short) conf.getInt("dfs.replication", 3); if (nameNodeAddr != null && rpcNamenode == null) { this.rpcNamenode = createRPCNamenode(nameNodeAddr, conf, ugi); this.namenode = createNamenode(this.rpcNamenode); } else if (nameNodeAddr == null && rpcNamenode != null) { //This case is used for testing. this.namenode = this.rpcNamenode = rpcNamenode; } else { throw new IllegalArgumentException( "Expecting exactly one of nameNodeAddr and rpcNamenode being null: " + "nameNodeAddr=" + nameNodeAddr + ", rpcNamenode=" + rpcNamenode); } }