Java Code Examples for org.apache.hadoop.conf.Configuration#getStrings()
The following examples show how to use
org.apache.hadoop.conf.Configuration#getStrings() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SSLFactory.java From tez with Apache License 2.0 | 6 votes |
/** * Creates an SSLFactory. * * @param mode SSLFactory mode, client or server. * @param conf Hadoop configuration from where the SSLFactory configuration * will be read. */ public SSLFactory(Mode mode, Configuration conf) { this.conf = conf; if (mode == null) { throw new IllegalArgumentException("mode cannot be NULL"); } this.mode = mode; requireClientCert = conf.getBoolean(SSL_REQUIRE_CLIENT_CERT_KEY, DEFAULT_SSL_REQUIRE_CLIENT_CERT); // Rest of ssl configs are pre-populated in incoming conf payload conf.setBoolean(SSL_REQUIRE_CLIENT_CERT_KEY, requireClientCert); Class<? extends KeyStoresFactory> klass = conf.getClass(KEYSTORES_FACTORY_CLASS_KEY, FileBasedKeyStoresFactory.class, KeyStoresFactory.class); keystoresFactory = ReflectionUtils.newInstance(klass, conf); enabledProtocols = conf.getStrings(SSL_ENABLED_PROTOCOLS, DEFAULT_SSL_ENABLED_PROTOCOLS); }
Example 2
Source File: NodeManager.java From hadoop with Apache License 2.0 | 6 votes |
public static NodeHealthScriptRunner getNodeHealthScriptRunner(Configuration conf) { String nodeHealthScript = conf.get(YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_PATH); if(!NodeHealthScriptRunner.shouldRun(nodeHealthScript)) { LOG.info("Abey khali"); return null; } long nmCheckintervalTime = conf.getLong( YarnConfiguration.NM_HEALTH_CHECK_INTERVAL_MS, YarnConfiguration.DEFAULT_NM_HEALTH_CHECK_INTERVAL_MS); long scriptTimeout = conf.getLong( YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS, YarnConfiguration.DEFAULT_NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS); String[] scriptArgs = conf.getStrings( YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_OPTS, new String[] {}); return new NodeHealthScriptRunner(nodeHealthScript, nmCheckintervalTime, scriptTimeout, scriptArgs); }
Example 3
Source File: DeprecatedQueueConfigurationParser.java From hadoop with Apache License 2.0 | 6 votes |
private List<Queue> createQueues(Configuration conf) { String[] queueNameValues = conf.getStrings( MAPRED_QUEUE_NAMES_KEY); List<Queue> list = new ArrayList<Queue>(); for (String name : queueNameValues) { try { Map<String, AccessControlList> acls = getQueueAcls( name, conf); QueueState state = getQueueState(name, conf); Queue q = new Queue(name, acls, state); list.add(q); } catch (Throwable t) { LOG.warn("Not able to initialize queue " + name); } } return list; }
Example 4
Source File: TezYARNUtils.java From incubator-tez with Apache License 2.0 | 6 votes |
public static String getFrameworkClasspath(Configuration conf) { Map<String, String> environment = new HashMap<String, String>(); TezYARNUtils.addToEnvironment(environment, Environment.CLASSPATH.name(), Environment.PWD.$(), File.pathSeparator); TezYARNUtils.addToEnvironment(environment, Environment.CLASSPATH.name(), Environment.PWD.$() + File.separator + "*", File.pathSeparator); // Add YARN/COMMON/HDFS jars and conf locations to path for (String c : conf.getStrings( YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { TezYARNUtils.addToEnvironment(environment, Environment.CLASSPATH.name(), c.trim(), File.pathSeparator); } return StringInterner.weakIntern(environment.get(Environment.CLASSPATH.name())); }
Example 5
Source File: DerivedColumnTransformationTest.java From incubator-pinot with Apache License 2.0 | 6 votes |
private void resetAvroSerialization() throws IOException { Configuration conf = mapDriver.getConfiguration(); conf.set("io.serializations", "org.apache.hadoop.io.serializer.JavaSerialization," + "org.apache.hadoop.io.serializer.WritableSerialization"); Schema outputSchema = new Schema.Parser().parse(ClassLoader.getSystemResourceAsStream(TRANSFORMATION_SCHEMA)); String[] currentSerializations = conf.getStrings(HADOOP_IO_SERIALIZATION); String[] finalSerializations = new String[currentSerializations.length + 1]; System.arraycopy(currentSerializations, 0, finalSerializations, 0, currentSerializations.length); finalSerializations[finalSerializations.length - 1] = AvroSerialization.class.getName(); mapDriver.getConfiguration().setStrings(HADOOP_IO_SERIALIZATION, finalSerializations); AvroSerialization.addToConfiguration(conf); AvroSerialization.setKeyWriterSchema(conf, outputSchema); AvroSerialization.setValueWriterSchema(conf, Schema.create(Schema.Type.NULL)); }
Example 6
Source File: DeprecatedQueueConfigurationParser.java From hadoop with Apache License 2.0 | 5 votes |
/** * Check if queue properties are configured in the passed in * configuration. If yes, print out deprecation warning messages. */ private boolean deprecatedConf(Configuration conf) { String[] queues = null; String queueNameValues = getQueueNames(conf); if (queueNameValues == null) { return false; } else { LOG.warn( "Configuring \"" + MAPRED_QUEUE_NAMES_KEY + "\" in mapred-site.xml or " + "hadoop-site.xml is deprecated and will overshadow " + QUEUE_CONF_FILE_NAME + ". Remove this property and configure " + "queue hierarchy in " + QUEUE_CONF_FILE_NAME); // store queues so we can check if ACLs are also configured // in the deprecated files. queues = conf.getStrings(MAPRED_QUEUE_NAMES_KEY); } // check if acls are defined if (queues != null) { for (String queue : queues) { for (QueueACL qAcl : QueueACL.values()) { String key = toFullPropertyName(queue, qAcl.getAclName()); String aclString = conf.get(key); if (aclString != null) { LOG.warn( "Configuring queue ACLs in mapred-site.xml or " + "hadoop-site.xml is deprecated. Configure queue ACLs in " + QUEUE_CONF_FILE_NAME); // even if one string is configured, it is enough for printing // the warning. so we can return from here. return true; } } } } return true; }
Example 7
Source File: MRRSleepJob.java From tez with Apache License 2.0 | 5 votes |
protected void setup(Context context) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); this.mapSleepCount = conf.getInt(MAP_SLEEP_COUNT, mapSleepCount); this.mapSleepDuration = mapSleepCount == 0 ? 0 : conf.getLong(MAP_SLEEP_TIME , 100) / mapSleepCount; vertexName = conf.get( org.apache.tez.mapreduce.hadoop.MRJobConfig.VERTEX_NAME); TaskAttemptID taId = context.getTaskAttemptID(); String[] taskIds = conf.getStrings(MAP_ERROR_TASK_IDS); if (taId.getId()+1 >= context.getMaxMapAttempts()) { finalAttempt = true; } boolean found = false; if (taskIds != null) { if (taskIds.length == 1 && taskIds[0].equals("*")) { found = true; } if (!found) { for (String taskId : taskIds) { if (Integer.parseInt(taskId) == taId.getTaskID().getId()) { found = true; break; } } } } if (found) { if (!finalAttempt) { throwError = conf.getBoolean(MAP_THROW_ERROR, false); } throwFatal = conf.getBoolean(MAP_FATAL_ERROR, false); } }
Example 8
Source File: MetadataIndexer.java From anthelion with Apache License 2.0 | 5 votes |
public void setConf(Configuration conf) { this.conf = conf; dbFieldnames = conf.getStrings(db_CONF_PROPERTY); parseFieldnames = conf.getStrings(parse_CONF_PROPERTY); contentFieldnames = conf.getStrings(content_CONF_PROPERTY); // TODO check conflict between field names e.g. could have same label // from different sources }
Example 9
Source File: ClassName.java From big-c with Apache License 2.0 | 5 votes |
@Override protected boolean needsAnonymization(Configuration conf) { String[] preserves = conf.getStrings(CLASSNAME_PRESERVE_CONFIG); if (preserves != null) { // do a simple starts with check for (String p : preserves) { if (className.startsWith(p)) { return false; } } } return true; }
Example 10
Source File: HRegionServer.java From hbase with Apache License 2.0 | 5 votes |
/** * Run test on configured codecs to make sure supporting libs are in place. */ private static void checkCodecs(final Configuration c) throws IOException { // check to see if the codec list is available: String [] codecs = c.getStrings(REGIONSERVER_CODEC, (String[])null); if (codecs == null) return; for (String codec : codecs) { if (!CompressionTest.testCompression(codec)) { throw new IOException("Compression codec " + codec + " not supported, aborting RS construction"); } } }
Example 11
Source File: GridMixRunner.java From RDFS with Apache License 2.0 | 5 votes |
private static int[] getInts(Configuration conf, String name, int defaultV) { String[] vals = conf.getStrings(name, String.valueOf(defaultV)); int[] results = new int[vals.length]; for (int i = 0; i < vals.length; ++i) { results[i] = Integer.parseInt(vals[i]); } return results; }
Example 12
Source File: TezUtilsInternal.java From tez with Apache License 2.0 | 5 votes |
public static <T extends Enum<T>> Set<T> getEnums(Configuration conf, String confName, Class<T> enumType, String defaultValues) { String[] names = conf.getStrings(confName); if (names == null) { names = StringUtils.getStrings(defaultValues); } if (names == null) { return null; } Set<T> enums = new HashSet<>(); for (String name : names) { enums.add(Enum.valueOf(enumType, name)); } return enums; }
Example 13
Source File: HTMLLanguageParser.java From nutch-htmlunit with Apache License 2.0 | 5 votes |
public void setConf(Configuration conf) { this.conf = conf; contentMaxlength = conf.getInt("lang.analyze.max.length", -1); onlyCertain = conf.getBoolean("lang.identification.only.certain", false); String[] policy = conf.getStrings("lang.extraction.policy"); for (int i = 0; i < policy.length; i++) { if (policy[i].equals("detect")) { detect = i; } else if (policy[i].equals("identify")) { identify = i; } } }
Example 14
Source File: ConfigUtils.java From rya with Apache License 2.0 | 5 votes |
protected static Set<IRI> getPredicates(final Configuration conf, final String confName) { final String[] validPredicateStrings = conf.getStrings(confName, new String[] {}); final Set<IRI> predicates = new HashSet<>(); for (final String prediateString : validPredicateStrings) { predicates.add(SimpleValueFactory.getInstance().createIRI(prediateString)); } return predicates; }
Example 15
Source File: Task.java From RDFS with Apache License 2.0 | 5 votes |
/** * Load the static resolutions from configuration. This is required for junit * to work on testcases that simulate multiple nodes on a single physical * node. * @param conf The configuration. */ public static void loadStaticResolutions(Configuration conf) { String hostToResolved[] = conf.getStrings("hadoop.net.static.resolutions"); if (hostToResolved != null) { for (String str : hostToResolved) { String name = str.substring(0, str.indexOf('=')); String resolvedName = str.substring(str.indexOf('=') + 1); NetUtils.addStaticResolution(name, resolvedName); } } }
Example 16
Source File: CoprocessorHost.java From hbase with Apache License 2.0 | 4 votes |
/** * Load system coprocessors once only. Read the class names from configuration. * Called by constructor. */ protected void loadSystemCoprocessors(Configuration conf, String confKey) { boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, DEFAULT_COPROCESSORS_ENABLED); if (!coprocessorsEnabled) { return; } Class<?> implClass; // load default coprocessors from configure file String[] defaultCPClasses = conf.getStrings(confKey); if (defaultCPClasses == null || defaultCPClasses.length == 0) return; int currentSystemPriority = Coprocessor.PRIORITY_SYSTEM; for (String className : defaultCPClasses) { String[] classNameAndPriority = className.split("\\|"); boolean hasPriorityOverride = false; className = classNameAndPriority[0]; int overridePriority = Coprocessor.PRIORITY_SYSTEM; if (classNameAndPriority.length > 1){ overridePriority = Integer.parseInt(classNameAndPriority[1]); hasPriorityOverride = true; } className = className.trim(); if (findCoprocessor(className) != null) { // If already loaded will just continue LOG.warn("Attempted duplicate loading of " + className + "; skipped"); continue; } ClassLoader cl = this.getClass().getClassLoader(); Thread.currentThread().setContextClassLoader(cl); try { implClass = cl.loadClass(className); int coprocPriority = hasPriorityOverride ? overridePriority : currentSystemPriority; // Add coprocessors as we go to guard against case where a coprocessor is specified twice // in the configuration E env = checkAndLoadInstance(implClass, coprocPriority, conf); if (env != null) { this.coprocEnvironments.add(env); LOG.info("System coprocessor {} loaded, priority={}.", className, coprocPriority); if (!hasPriorityOverride) { ++currentSystemPriority; } } } catch (Throwable t) { // We always abort if system coprocessors cannot be loaded abortServer(className, t); } } }
Example 17
Source File: RecordIterator.java From datawave with Apache License 2.0 | 4 votes |
public RecordIterator(final TabletSplitSplit fileSplit, AccumuloConfiguration acuTableConf, Configuration conf) { super(); this.conf = conf; this.conf.setInt("dfs.client.socket-timeout", 10 * 1000); failureMax = conf.getInt(RECORDITER_FAILURE_COUNT_MAX, FAILURE_MAX_DEFAULT); failureSleep = conf.getLong(RECORDITER_FAILURE_SLEEP_INTERVAL, DEFAULT_FAILURE_SLEEP); String[] authStrings = conf.getStrings("recorditer.auth.string"); List<ByteBuffer> authBuffer = Lists.newArrayList(); if (null != authStrings) { for (String authString : authStrings) { authBuffer.add(ByteBuffer.wrap(authString.getBytes())); } } auths = new Authorizations(authBuffer); this.fileSplit = fileSplit; this.acuTableConf = acuTableConf; executor = Executors.newFixedThreadPool(READ_AHEAD_THREADS, new RecordIteratorFactory("RecordIterator ")); try { fileRangeSplits = buildRangeSplits(fileSplit); if (log.isTraceEnabled()) { log.trace("Iterator over the following files: " + fileRangeSplits); } initialize(conf, true); if (rangeQueue.isEmpty()) { throw new RuntimeException("Queue of ranges is empty"); } seekToNextKey(); } catch (Exception e) { try { close(); } catch (IOException e1) { // ignore } throw new RuntimeException(e); } }
Example 18
Source File: SqoopHCatImportHelper.java From aliyun-maxcompute-data-collectors with Apache License 2.0 | 4 votes |
public SqoopHCatImportHelper(Configuration conf) throws IOException, InterruptedException { String inputJobInfoStr = conf.get(HCatConstants.HCAT_KEY_JOB_INFO); jobInfo = (InputJobInfo) HCatUtil.deserialize(inputJobInfoStr); dataColsSchema = jobInfo.getTableInfo().getDataColumns(); partitionSchema = jobInfo.getTableInfo().getPartitionColumns(); StringBuilder storerInfoStr = new StringBuilder(1024); StorerInfo storerInfo = jobInfo.getTableInfo().getStorerInfo(); storerInfoStr.append("HCatalog Storer Info : ").append("\n\tHandler = ") .append(storerInfo.getStorageHandlerClass()) .append("\n\tInput format class = ").append(storerInfo.getIfClass()) .append("\n\tOutput format class = ").append(storerInfo.getOfClass()) .append("\n\tSerde class = ").append(storerInfo.getSerdeClass()); Properties storerProperties = storerInfo.getProperties(); if (!storerProperties.isEmpty()) { storerInfoStr.append("\nStorer properties "); for (Map.Entry<Object, Object> entry : storerProperties.entrySet()) { String key = (String) entry.getKey(); Object val = entry.getValue(); storerInfoStr.append("\n\t").append(key).append('=').append(val); } } storerInfoStr.append("\n"); LOG.info(storerInfoStr); hCatFullTableSchema = new HCatSchema(dataColsSchema.getFields()); for (HCatFieldSchema hfs : partitionSchema.getFields()) { hCatFullTableSchema.append(hfs); } fieldCount = hCatFullTableSchema.size(); lobLoader = new LargeObjectLoader(conf, new Path(jobInfo.getTableInfo() .getTableLocation())); bigDecimalFormatString = conf.getBoolean( ImportJobBase.PROPERTY_BIGDECIMAL_FORMAT, ImportJobBase.PROPERTY_BIGDECIMAL_FORMAT_DEFAULT); debugHCatImportMapper = conf.getBoolean( SqoopHCatUtilities.DEBUG_HCAT_IMPORT_MAPPER_PROP, false); IntWritable[] delimChars = DefaultStringifier.loadArray(conf, SqoopHCatUtilities.HIVE_DELIMITERS_TO_REPLACE_PROP, IntWritable.class); hiveDelimiters = new DelimiterSet((char) delimChars[0].get(), (char) delimChars[1].get(), (char) delimChars[2].get(), (char) delimChars[3].get(), delimChars[4].get() == 1 ? true : false); hiveDelimsReplacement = conf .get(SqoopHCatUtilities.HIVE_DELIMITERS_REPLACEMENT_PROP); if (hiveDelimsReplacement == null) { hiveDelimsReplacement = ""; } doHiveDelimsReplacement = Boolean.valueOf(conf .get(SqoopHCatUtilities.HIVE_DELIMITERS_REPLACEMENT_ENABLED_PROP)); IntWritable[] fPos = DefaultStringifier.loadArray(conf, SqoopHCatUtilities.HCAT_FIELD_POSITIONS_PROP, IntWritable.class); hCatFieldPositions = new int[fPos.length]; for (int i = 0; i < fPos.length; ++i) { hCatFieldPositions[i] = fPos[i].get(); } LOG.debug("Hive delims replacement enabled : " + doHiveDelimsReplacement); LOG.debug("Hive Delimiters : " + hiveDelimiters.toString()); LOG.debug("Hive delimiters replacement : " + hiveDelimsReplacement); staticPartitionKeys = conf .getStrings(SqoopHCatUtilities.HCAT_STATIC_PARTITION_KEY_PROP); String partKeysString = staticPartitionKeys == null ? "" : Arrays.toString(staticPartitionKeys); LOG.debug("Static partition key used : " + partKeysString); }
Example 19
Source File: InputSplitPruneUtil.java From incubator-retired-blur with Apache License 2.0 | 4 votes |
public static long getBlurLookupRowIdFromIndexCount(Configuration configuration, String table, int shard) { String[] strings = configuration.getStrings(getBlurLookupRowIdFromIndexCountName(table)); return getCount(strings, shard); }
Example 20
Source File: RDFWritable.java From marklogic-contentpump with Apache License 2.0 | 4 votes |
@Override public Content getContent(Configuration conf, ContentCreateOptions options, String uri) { String[] collections = conf.getStrings(MarkLogicConstants.OUTPUT_COLLECTION); String outputGraph = conf.get(MarkLogicConstants.OUTPUT_GRAPH); String outputOverrideGraph = conf.get(MarkLogicConstants.OUTPUT_OVERRIDE_GRAPH); if (collections != null) { List<String> optionList = new ArrayList<String>(); if (graphUri == null) { //no graph specified in quad if( outputGraph != null)//output_graph is set optionList.add(outputGraph.trim()); else if (outputOverrideGraph != null) { optionList.add(outputOverrideGraph.trim()); } } else { if( outputOverrideGraph != null) optionList.add(outputOverrideGraph); else optionList.add(graphUri);//use quad's graph } //collections are always added Collections.addAll(optionList, collections); collections = optionList.toArray(new String[0]); for (int i = 0; i < collections.length; i++) { collections[i] = collections[i].trim(); } options.setCollections(collections); } else { if (graphUri == null) { if (outputOverrideGraph != null) { graphUri = outputOverrideGraph; } else if (outputGraph != null) { graphUri = outputGraph; } else { graphUri = "http://marklogic.com/semantics#default-graph"; } } String[] col = new String[1]; col[0] = graphUri; options.setCollections(col); } options.setGraph(graphUri); //permissions if (permissions!=null) options.setPermissions(permissions); Content content = null; if (value instanceof Text) { content = ContentFactory.newContent(uri, ((Text) value).toString(), options); } else if (value instanceof MarkLogicNode) { content = ContentFactory.newContent(uri, ((MarkLogicNode) value).get(), options); } else if (value instanceof BytesWritable) { content = ContentFactory.newContent(uri, ((BytesWritable) value).getBytes(), 0, ((BytesWritable) value).getLength(), options); } return content; }