Java Code Examples for org.apache.hadoop.conf.Configuration#getTrimmedStrings()
The following examples show how to use
org.apache.hadoop.conf.Configuration#getTrimmedStrings() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TopConf.java From hadoop with Apache License 2.0 | 6 votes |
public TopConf(Configuration conf) { isEnabled = conf.getBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY, DFSConfigKeys.NNTOP_ENABLED_DEFAULT); String[] periodsStr = conf.getTrimmedStrings( DFSConfigKeys.NNTOP_WINDOWS_MINUTES_KEY, DFSConfigKeys.NNTOP_WINDOWS_MINUTES_DEFAULT); nntopReportingPeriodsMs = new int[periodsStr.length]; for (int i = 0; i < periodsStr.length; i++) { nntopReportingPeriodsMs[i] = Ints.checkedCast( TimeUnit.MINUTES.toMillis(Integer.parseInt(periodsStr[i]))); } for (int aPeriodMs: nntopReportingPeriodsMs) { Preconditions.checkArgument(aPeriodMs >= TimeUnit.MINUTES.toMillis(1), "minimum reporting period is 1 min!"); } }
Example 2
Source File: TopConf.java From big-c with Apache License 2.0 | 6 votes |
public TopConf(Configuration conf) { isEnabled = conf.getBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY, DFSConfigKeys.NNTOP_ENABLED_DEFAULT); String[] periodsStr = conf.getTrimmedStrings( DFSConfigKeys.NNTOP_WINDOWS_MINUTES_KEY, DFSConfigKeys.NNTOP_WINDOWS_MINUTES_DEFAULT); nntopReportingPeriodsMs = new int[periodsStr.length]; for (int i = 0; i < periodsStr.length; i++) { nntopReportingPeriodsMs[i] = Ints.checkedCast( TimeUnit.MINUTES.toMillis(Integer.parseInt(periodsStr[i]))); } for (int aPeriodMs: nntopReportingPeriodsMs) { Preconditions.checkArgument(aPeriodMs >= TimeUnit.MINUTES.toMillis(1), "minimum reporting period is 1 min!"); } }
Example 3
Source File: SerializationFactory.java From big-c with Apache License 2.0 | 5 votes |
/** * <p> * Serializations are found by reading the <code>io.serializations</code> * property from <code>conf</code>, which is a comma-delimited list of * classnames. * </p> */ public SerializationFactory(Configuration conf) { super(conf); for (String serializerName : conf.getTrimmedStrings( CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, new String[]{WritableSerialization.class.getName(), AvroSpecificSerialization.class.getName(), AvroReflectSerialization.class.getName()})) { add(conf, serializerName); } }
Example 4
Source File: SpanReceiverHost.java From big-c with Apache License 2.0 | 5 votes |
/** * Reads the names of classes specified in the * "hadoop.htrace.spanreceiver.classes" property and instantiates and registers * them with the Tracer as SpanReceiver's. * * The nullary constructor is called during construction, but if the classes * specified implement the Configurable interface, setConfiguration() will be * called on them. This allows SpanReceivers to use values from the Hadoop * configuration. */ public synchronized void loadSpanReceivers(Configuration conf) { config = new Configuration(conf); String receiverKey = confPrefix + SPAN_RECEIVERS_CONF_SUFFIX; String[] receiverNames = config.getTrimmedStrings(receiverKey); if (receiverNames == null || receiverNames.length == 0) { if (LOG.isTraceEnabled()) { LOG.trace("No span receiver names found in " + receiverKey + "."); } return; } // It's convenient to have each daemon log to a random trace file when // testing. String pathKey = confPrefix + LOCAL_FILE_SPAN_RECEIVER_PATH_SUFFIX; if (config.get(pathKey) == null) { String uniqueFile = getUniqueLocalTraceFileName(); config.set(pathKey, uniqueFile); if (LOG.isTraceEnabled()) { LOG.trace("Set " + pathKey + " to " + uniqueFile); } } for (String className : receiverNames) { try { SpanReceiver rcvr = loadInstance(className, EMPTY); Trace.addReceiver(rcvr); receivers.put(highestId++, rcvr); LOG.info("Loaded SpanReceiver " + className + " successfully."); } catch (IOException e) { LOG.error("Failed to load SpanReceiver", e); } } }
Example 5
Source File: SaslPropertiesResolver.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void setConf(Configuration conf) { this.conf = conf; properties = new TreeMap<String,String>(); String[] qop = conf.getTrimmedStrings( CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION, QualityOfProtection.AUTHENTICATION.toString()); for (int i=0; i < qop.length; i++) { qop[i] = QualityOfProtection.valueOf( StringUtils.toUpperCase(qop[i])).getSaslQop(); } properties.put(Sasl.QOP, StringUtils.join(",", qop)); properties.put(Sasl.SERVER_AUTH, "true"); }
Example 6
Source File: ProxyServers.java From hadoop with Apache License 2.0 | 5 votes |
public static void refresh(Configuration conf){ Collection<String> tempServers = new HashSet<String>(); // trusted proxy servers such as http proxies for (String host : conf.getTrimmedStrings(CONF_HADOOP_PROXYSERVERS)) { InetSocketAddress addr = new InetSocketAddress(host, 0); if (!addr.isUnresolved()) { tempServers.add(addr.getAddress().getHostAddress()); } } proxyServers = tempServers; }
Example 7
Source File: SpanReceiverHost.java From hadoop with Apache License 2.0 | 5 votes |
/** * Reads the names of classes specified in the * "hadoop.htrace.spanreceiver.classes" property and instantiates and registers * them with the Tracer as SpanReceiver's. * * The nullary constructor is called during construction, but if the classes * specified implement the Configurable interface, setConfiguration() will be * called on them. This allows SpanReceivers to use values from the Hadoop * configuration. */ public synchronized void loadSpanReceivers(Configuration conf) { config = new Configuration(conf); String receiverKey = confPrefix + SPAN_RECEIVERS_CONF_SUFFIX; String[] receiverNames = config.getTrimmedStrings(receiverKey); if (receiverNames == null || receiverNames.length == 0) { if (LOG.isTraceEnabled()) { LOG.trace("No span receiver names found in " + receiverKey + "."); } return; } // It's convenient to have each daemon log to a random trace file when // testing. String pathKey = confPrefix + LOCAL_FILE_SPAN_RECEIVER_PATH_SUFFIX; if (config.get(pathKey) == null) { String uniqueFile = getUniqueLocalTraceFileName(); config.set(pathKey, uniqueFile); if (LOG.isTraceEnabled()) { LOG.trace("Set " + pathKey + " to " + uniqueFile); } } for (String className : receiverNames) { try { SpanReceiver rcvr = loadInstance(className, EMPTY); Trace.addReceiver(rcvr); receivers.put(highestId++, rcvr); LOG.info("Loaded SpanReceiver " + className + " successfully."); } catch (IOException e) { LOG.error("Failed to load SpanReceiver", e); } } }
Example 8
Source File: ProxyServers.java From big-c with Apache License 2.0 | 5 votes |
public static void refresh(Configuration conf){ Collection<String> tempServers = new HashSet<String>(); // trusted proxy servers such as http proxies for (String host : conf.getTrimmedStrings(CONF_HADOOP_PROXYSERVERS)) { InetSocketAddress addr = new InetSocketAddress(host, 0); if (!addr.isUnresolved()) { tempServers.add(addr.getAddress().getHostAddress()); } } proxyServers = tempServers; }
Example 9
Source File: SnapshotCodecProvider.java From phoenix-tephra with Apache License 2.0 | 5 votes |
/** * Register all codec specified in the configuration with this provider. * There can only be one codec for a given version. */ private void initialize(Configuration configuration) { String[] codecClassNames = configuration.getTrimmedStrings(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES); List<Class> codecClasses = Lists.newArrayList(); if (codecClassNames != null) { for (String clsName : codecClassNames) { try { codecClasses.add(Class.forName(clsName)); } catch (ClassNotFoundException cnfe) { LOG.warn("Unable to load class configured for " + TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES + ": " + clsName, cnfe); } } } if (codecClasses.size() == 0) { codecClasses.addAll(Arrays.asList(TxConstants.Persist.DEFAULT_TX_SNAPHOT_CODEC_CLASSES)); } for (Class<?> codecClass : codecClasses) { try { SnapshotCodec codec = (SnapshotCodec) (codecClass.newInstance()); codecs.put(codec.getVersion(), codec); LOG.debug("Using snapshot codec {} for snapshots of version {}", codecClass.getName(), codec.getVersion()); } catch (Exception e) { LOG.warn("Error instantiating snapshot codec {}. Skipping.", codecClass.getName(), e); } } }
Example 10
Source File: KDEJobRunner.java From geowave with Apache License 2.0 | 5 votes |
protected void addJobClasspathDependencies(final Job job, final Configuration conf) throws IOException, URISyntaxException { final String[] jars = conf.getTrimmedStrings(GEOWAVE_CLASSPATH_JARS); if (jars != null) { for (final String jarPath : jars) { job.addArchiveToClassPath(new Path(new URI(jarPath))); } } }
Example 11
Source File: SaslPropertiesResolver.java From big-c with Apache License 2.0 | 5 votes |
@Override public void setConf(Configuration conf) { this.conf = conf; properties = new TreeMap<String,String>(); String[] qop = conf.getTrimmedStrings( CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION, QualityOfProtection.AUTHENTICATION.toString()); for (int i=0; i < qop.length; i++) { qop[i] = QualityOfProtection.valueOf( StringUtils.toUpperCase(qop[i])).getSaslQop(); } properties.put(Sasl.QOP, StringUtils.join(",", qop)); properties.put(Sasl.SERVER_AUTH, "true"); }
Example 12
Source File: TableInputFormat.java From hbase with Apache License 2.0 | 4 votes |
/** * Sets up a {@link Scan} instance, applying settings from the configuration property * constants defined in {@code TableInputFormat}. This allows specifying things such as: * <ul> * <li>start and stop rows</li> * <li>column qualifiers or families</li> * <li>timestamps or timerange</li> * <li>scanner caching and batch size</li> * </ul> */ public static Scan createScanFromConfiguration(Configuration conf) throws IOException { Scan scan = new Scan(); if (conf.get(SCAN_ROW_START) != null) { scan.withStartRow(Bytes.toBytesBinary(conf.get(SCAN_ROW_START))); } if (conf.get(SCAN_ROW_STOP) != null) { scan.withStopRow(Bytes.toBytesBinary(conf.get(SCAN_ROW_STOP))); } if (conf.get(SCAN_COLUMNS) != null) { addColumns(scan, conf.get(SCAN_COLUMNS)); } for (String columnFamily : conf.getTrimmedStrings(SCAN_COLUMN_FAMILY)) { scan.addFamily(Bytes.toBytes(columnFamily)); } if (conf.get(SCAN_TIMESTAMP) != null) { scan.setTimestamp(Long.parseLong(conf.get(SCAN_TIMESTAMP))); } if (conf.get(SCAN_TIMERANGE_START) != null && conf.get(SCAN_TIMERANGE_END) != null) { scan.setTimeRange( Long.parseLong(conf.get(SCAN_TIMERANGE_START)), Long.parseLong(conf.get(SCAN_TIMERANGE_END))); } if (conf.get(SCAN_MAXVERSIONS) != null) { scan.readVersions(Integer.parseInt(conf.get(SCAN_MAXVERSIONS))); } if (conf.get(SCAN_CACHEDROWS) != null) { scan.setCaching(Integer.parseInt(conf.get(SCAN_CACHEDROWS))); } if (conf.get(SCAN_BATCHSIZE) != null) { scan.setBatch(Integer.parseInt(conf.get(SCAN_BATCHSIZE))); } // false by default, full table scans generate too much BC churn scan.setCacheBlocks((conf.getBoolean(SCAN_CACHEBLOCKS, false))); return scan; }
Example 13
Source File: MRApps.java From hadoop with Apache License 2.0 | 4 votes |
@VisibleForTesting static String[] getSystemClasses(Configuration conf) { return conf.getTrimmedStrings( MRJobConfig.MAPREDUCE_JOB_CLASSLOADER_SYSTEM_CLASSES); }
Example 14
Source File: ShortCircuitRegistry.java From big-c with Apache License 2.0 | 4 votes |
public ShortCircuitRegistry(Configuration conf) throws IOException { boolean enabled = false; SharedFileDescriptorFactory shmFactory = null; DomainSocketWatcher watcher = null; try { int interruptCheck = conf.getInt( DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT); if (interruptCheck <= 0) { throw new IOException( DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS + " was set to " + interruptCheck); } String shmPaths[] = conf.getTrimmedStrings(DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS); if (shmPaths.length == 0) { shmPaths = DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS_DEFAULT.split(","); } shmFactory = SharedFileDescriptorFactory. create("HadoopShortCircuitShm_", shmPaths); String dswLoadingFailure = DomainSocketWatcher.getLoadingFailureReason(); if (dswLoadingFailure != null) { throw new IOException(dswLoadingFailure); } watcher = new DomainSocketWatcher(interruptCheck, "datanode"); enabled = true; if (LOG.isDebugEnabled()) { LOG.debug("created new ShortCircuitRegistry with interruptCheck=" + interruptCheck + ", shmPath=" + shmFactory.getPath()); } } catch (IOException e) { if (LOG.isDebugEnabled()) { LOG.debug("Disabling ShortCircuitRegistry", e); } } finally { this.enabled = enabled; this.shmFactory = shmFactory; this.watcher = watcher; } }
Example 15
Source File: FsDatasetImpl.java From big-c with Apache License 2.0 | 4 votes |
/** * An FSDataset has a directory where it loads its data files. */ FsDatasetImpl(DataNode datanode, DataStorage storage, Configuration conf ) throws IOException { this.fsRunning = true; this.datanode = datanode; this.dataStorage = storage; this.conf = conf; // The number of volumes required for operation is the total number // of volumes minus the number of failed volumes we can tolerate. final int volFailuresTolerated = conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT); String[] dataDirs = conf.getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY); Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf); List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos( dataLocations, storage); int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length; int volsFailed = volumeFailureInfos.size(); this.validVolsRequired = volsConfigured - volFailuresTolerated; if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) { throw new DiskErrorException("Invalid volume failure " + " config value: " + volFailuresTolerated); } if (volsFailed > volFailuresTolerated) { throw new DiskErrorException("Too many failed volumes - " + "current valid volumes: " + storage.getNumStorageDirs() + ", volumes configured: " + volsConfigured + ", volumes failed: " + volsFailed + ", volume failures tolerated: " + volFailuresTolerated); } storageMap = new ConcurrentHashMap<String, DatanodeStorage>(); volumeMap = new ReplicaMap(this); ramDiskReplicaTracker = RamDiskReplicaTracker.getInstance(conf, this); @SuppressWarnings("unchecked") final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl = ReflectionUtils.newInstance(conf.getClass( DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY, RoundRobinVolumeChoosingPolicy.class, VolumeChoosingPolicy.class), conf); volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(), blockChooserImpl); asyncDiskService = new FsDatasetAsyncDiskService(datanode, this); asyncLazyPersistService = new RamDiskAsyncLazyPersistService(datanode); deletingBlock = new HashMap<String, Set<Long>>(); for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) { addVolume(dataLocations, storage.getStorageDir(idx)); } setupAsyncLazyPersistThreads(); cacheManager = new FsDatasetCache(this); // Start the lazy writer once we have built the replica maps. lazyWriter = new Daemon(new LazyWriter(conf)); lazyWriter.start(); registerMBean(datanode.getDatanodeUuid()); localFS = FileSystem.getLocal(conf); blockPinningEnabled = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED, DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT); }
Example 16
Source File: DFSClient.java From big-c with Apache License 2.0 | 4 votes |
/** * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode. * If HA is enabled and a positive value is set for * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode * must be null. */ @VisibleForTesting public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats) throws IOException { SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX); traceSampler = new SamplerBuilder(TraceUtils. wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)).build(); // Copy only the required DFSClient configuration this.dfsClientConf = new Conf(conf); if (this.dfsClientConf.useLegacyBlockReaderLocal) { LOG.debug("Using legacy short-circuit local reads."); } this.conf = conf; this.stats = stats; this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority(); this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId(); int numResponseToDrop = conf.getInt( DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null; AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false); if (numResponseToDrop > 0) { // This case is used for testing. LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to " + numResponseToDrop + ", this hacked client will proactively drop responses"); proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, nameNodeUri, ClientProtocol.class, numResponseToDrop, nnFallbackToSimpleAuth); } if (proxyInfo != null) { this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } else if (rpcNamenode != null) { // This case is used for testing. Preconditions.checkArgument(nameNodeUri == null); this.namenode = rpcNamenode; dtService = null; } else { Preconditions.checkArgument(nameNodeUri != null, "null URI"); proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class, nnFallbackToSimpleAuth); this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } String localInterfaces[] = conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES); localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces); if (LOG.isDebugEnabled() && 0 != localInterfaces.length) { LOG.debug("Using local interfaces [" + Joiner.on(',').join(localInterfaces)+ "] with addresses [" + Joiner.on(',').join(localInterfaceAddrs) + "]"); } Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false); Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ? null : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0); Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false); this.defaultReadCachingStrategy = new CachingStrategy(readDropBehind, readahead); this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead); this.clientContext = ClientContext.get( conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT), dfsClientConf); this.hedgedReadThresholdMillis = conf.getLong( DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS); int numThreads = conf.getInt( DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE); if (numThreads > 0) { this.initThreadsNumForHedgedReads(numThreads); } this.saslClient = new SaslDataTransferClient( conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf), TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth); }
Example 17
Source File: TezTestServiceTaskSchedulerService.java From tez with Apache License 2.0 | 4 votes |
public TezTestServiceTaskSchedulerService(TaskSchedulerContext taskSchedulerContext) { // Accepting configuration here to allow setting up fields as final super(taskSchedulerContext); this.serviceHosts = new LinkedList<String>(); this.containerFactory = new ContainerFactory(taskSchedulerContext.getApplicationAttemptId(), taskSchedulerContext.getCustomClusterIdentifier()); Configuration conf = null; try { conf = TezUtils.createConfFromUserPayload(taskSchedulerContext.getInitialUserPayload()); } catch (IOException e) { throw new TezUncheckedException(e); } this.memoryPerInstance = conf .getInt(TezTestServiceConfConstants.TEZ_TEST_SERVICE_MEMORY_PER_INSTANCE_MB, -1); Preconditions.checkArgument(memoryPerInstance > 0, TezTestServiceConfConstants.TEZ_TEST_SERVICE_MEMORY_PER_INSTANCE_MB + " must be configured"); this.executorsPerInstance = conf.getInt( TezTestServiceConfConstants.TEZ_TEST_SERVICE_NUM_EXECUTORS_PER_INSTANCE, -1); Preconditions.checkArgument(executorsPerInstance > 0, TezTestServiceConfConstants.TEZ_TEST_SERVICE_NUM_EXECUTORS_PER_INSTANCE + " must be configured"); this.coresPerInstance = conf .getInt(TezTestServiceConfConstants.TEZ_TEST_SERVICE_VCPUS_PER_INSTANCE, executorsPerInstance); this.containerPort = conf.getInt(TezTestServiceConfConstants.TEZ_TEST_SERVICE_RPC_PORT, -1); Preconditions.checkArgument(executorsPerInstance > 0, TezTestServiceConfConstants.TEZ_TEST_SERVICE_RPC_PORT + " must be configured"); int memoryPerContainer = (int) (memoryPerInstance / (float) executorsPerInstance); int coresPerContainer = (int) (coresPerInstance / (float) executorsPerInstance); this.resourcePerContainer = Resource.newInstance(memoryPerContainer, coresPerContainer); String[] hosts = conf.getTrimmedStrings(TezTestServiceConfConstants.TEZ_TEST_SERVICE_HOSTS); if (hosts == null || hosts.length == 0) { hosts = new String[]{"localhost"}; } for (String host : hosts) { serviceHosts.add(host); } LOG.info("Running with configuration: " + "memoryPerInstance=" + memoryPerInstance + ", vcoresPerInstance=" + coresPerInstance + ", executorsPerInstance=" + executorsPerInstance + ", resourcePerContainerInferred=" + resourcePerContainer + ", hosts=" + serviceHosts.toString()); }
Example 18
Source File: ShortCircuitRegistry.java From hadoop with Apache License 2.0 | 4 votes |
public ShortCircuitRegistry(Configuration conf) throws IOException { boolean enabled = false; SharedFileDescriptorFactory shmFactory = null; DomainSocketWatcher watcher = null; try { int interruptCheck = conf.getInt( DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT); if (interruptCheck <= 0) { throw new IOException( DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS + " was set to " + interruptCheck); } String shmPaths[] = conf.getTrimmedStrings(DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS); if (shmPaths.length == 0) { shmPaths = DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS_DEFAULT.split(","); } shmFactory = SharedFileDescriptorFactory. create("HadoopShortCircuitShm_", shmPaths); String dswLoadingFailure = DomainSocketWatcher.getLoadingFailureReason(); if (dswLoadingFailure != null) { throw new IOException(dswLoadingFailure); } watcher = new DomainSocketWatcher(interruptCheck, "datanode"); enabled = true; if (LOG.isDebugEnabled()) { LOG.debug("created new ShortCircuitRegistry with interruptCheck=" + interruptCheck + ", shmPath=" + shmFactory.getPath()); } } catch (IOException e) { if (LOG.isDebugEnabled()) { LOG.debug("Disabling ShortCircuitRegistry", e); } } finally { this.enabled = enabled; this.shmFactory = shmFactory; this.watcher = watcher; } }
Example 19
Source File: FsDatasetImpl.java From hadoop with Apache License 2.0 | 4 votes |
/** * An FSDataset has a directory where it loads its data files. */ FsDatasetImpl(DataNode datanode, DataStorage storage, Configuration conf ) throws IOException { this.fsRunning = true; this.datanode = datanode; this.dataStorage = storage; this.conf = conf; // The number of volumes required for operation is the total number // of volumes minus the number of failed volumes we can tolerate. final int volFailuresTolerated = conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT); String[] dataDirs = conf.getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY); Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf); List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos( dataLocations, storage); int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length; int volsFailed = volumeFailureInfos.size(); this.validVolsRequired = volsConfigured - volFailuresTolerated; if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) { throw new DiskErrorException("Invalid volume failure " + " config value: " + volFailuresTolerated); } if (volsFailed > volFailuresTolerated) { throw new DiskErrorException("Too many failed volumes - " + "current valid volumes: " + storage.getNumStorageDirs() + ", volumes configured: " + volsConfigured + ", volumes failed: " + volsFailed + ", volume failures tolerated: " + volFailuresTolerated); } storageMap = new ConcurrentHashMap<String, DatanodeStorage>(); volumeMap = new ReplicaMap(this); ramDiskReplicaTracker = RamDiskReplicaTracker.getInstance(conf, this); @SuppressWarnings("unchecked") final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl = ReflectionUtils.newInstance(conf.getClass( DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY, RoundRobinVolumeChoosingPolicy.class, VolumeChoosingPolicy.class), conf); volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(), blockChooserImpl); asyncDiskService = new FsDatasetAsyncDiskService(datanode, this); asyncLazyPersistService = new RamDiskAsyncLazyPersistService(datanode); deletingBlock = new HashMap<String, Set<Long>>(); for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) { addVolume(dataLocations, storage.getStorageDir(idx)); } setupAsyncLazyPersistThreads(); cacheManager = new FsDatasetCache(this); // Start the lazy writer once we have built the replica maps. lazyWriter = new Daemon(new LazyWriter(conf)); lazyWriter.start(); registerMBean(datanode.getDatanodeUuid()); localFS = FileSystem.getLocal(conf); blockPinningEnabled = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED, DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT); }
Example 20
Source File: DFSClient.java From hadoop with Apache License 2.0 | 4 votes |
/** * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode. * If HA is enabled and a positive value is set for * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode * must be null. */ @VisibleForTesting public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats) throws IOException { SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX); traceSampler = new SamplerBuilder(TraceUtils. wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)).build(); // Copy only the required DFSClient configuration this.dfsClientConf = new Conf(conf); if (this.dfsClientConf.useLegacyBlockReaderLocal) { LOG.debug("Using legacy short-circuit local reads."); } this.conf = conf; this.stats = stats; this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority(); this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId(); int numResponseToDrop = conf.getInt( DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null; AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false); if (numResponseToDrop > 0) { // This case is used for testing. LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to " + numResponseToDrop + ", this hacked client will proactively drop responses"); proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, nameNodeUri, ClientProtocol.class, numResponseToDrop, nnFallbackToSimpleAuth); } if (proxyInfo != null) { this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } else if (rpcNamenode != null) { // This case is used for testing. Preconditions.checkArgument(nameNodeUri == null); this.namenode = rpcNamenode; dtService = null; } else { Preconditions.checkArgument(nameNodeUri != null, "null URI"); proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class, nnFallbackToSimpleAuth); this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } String localInterfaces[] = conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES); localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces); if (LOG.isDebugEnabled() && 0 != localInterfaces.length) { LOG.debug("Using local interfaces [" + Joiner.on(',').join(localInterfaces)+ "] with addresses [" + Joiner.on(',').join(localInterfaceAddrs) + "]"); } Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false); Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ? null : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0); Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false); this.defaultReadCachingStrategy = new CachingStrategy(readDropBehind, readahead); this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead); this.clientContext = ClientContext.get( conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT), dfsClientConf); this.hedgedReadThresholdMillis = conf.getLong( DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS); int numThreads = conf.getInt( DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE); if (numThreads > 0) { this.initThreadsNumForHedgedReads(numThreads); } this.saslClient = new SaslDataTransferClient( conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf), TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth); }