Java Code Examples for org.elasticsearch.common.settings.Settings#getAsList()
The following examples show how to use
org.elasticsearch.common.settings.Settings#getAsList() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AuditMessageRouter.java From deprecated-security-advanced-modules with Apache License 2.0 | 6 votes |
private final List<AuditLogSink> createSinksForCategory(Category category, Settings configuration) { List<AuditLogSink> sinksForCategory = new LinkedList<>(); List<String> sinks = configuration.getAsList("endpoints"); if (sinks == null || sinks.isEmpty()) { log.error("No endpoints configured for category {}", category); return sinksForCategory; } for (String sinkName : sinks) { AuditLogSink sink = sinkProvider.getSink(sinkName); if (sink != null && !sinksForCategory.contains(sink)) { sinksForCategory.add(sink); } else { log.error("Configured endpoint '{}' not available", sinkName); } } return sinksForCategory; }
Example 2
Source File: SSLConfigConstants.java From deprecated-security-ssl with Apache License 2.0 | 6 votes |
public static final String[] getSecureSSLProtocols(Settings settings, boolean http) { List<String> configuredProtocols = null; if(settings != null) { if(http) { configuredProtocols = settings.getAsList(OPENDISTRO_SECURITY_SSL_HTTP_ENABLED_PROTOCOLS, Collections.emptyList()); } else { configuredProtocols = settings.getAsList(OPENDISTRO_SECURITY_SSL_TRANSPORT_ENABLED_PROTOCOLS, Collections.emptyList()); } } if(configuredProtocols != null && configuredProtocols.size() > 0) { return configuredProtocols.toArray(new String[0]); } return _SECURE_SSL_PROTOCOLS.clone(); }
Example 3
Source File: SSLConfigConstants.java From deprecated-security-ssl with Apache License 2.0 | 6 votes |
public static final List<String> getSecureSSLCiphers(Settings settings, boolean http) { List<String> configuredCiphers = null; if(settings != null) { if(http) { configuredCiphers = settings.getAsList(OPENDISTRO_SECURITY_SSL_HTTP_ENABLED_CIPHERS, Collections.emptyList()); } else { configuredCiphers = settings.getAsList(OPENDISTRO_SECURITY_SSL_TRANSPORT_ENABLED_CIPHERS, Collections.emptyList()); } } if(configuredCiphers != null && configuredCiphers.size() > 0) { return configuredCiphers; } return Collections.unmodifiableList(Arrays.asList(_SECURE_SSL_CIPHERS)); }
Example 4
Source File: ChangesFeedPlugin.java From es-change-feed-plugin with Apache License 2.0 | 6 votes |
public ChangesFeedPlugin(Settings settings) { log.info("Starting Changes Plugin"); enabled = !settings.getAsBoolean(SETTING_DISABLE, false); filter = settings.getAsList(SETTING_FILTER, Collections.singletonList("*")); if (enabled) { int port = settings.getAsInt(SETTING_PORT, 9400); List<String> sourcesStr = settings.getAsList(SETTING_LISTEN_SOURCE, Collections.singletonList("*")); this.sources = sourcesStr.stream() .map(Source::new) .collect(Collectors.toSet()); WebSocketServer server = new WebSocketServer(port); server.start(); } else { sources = null; } }
Example 5
Source File: NGramSynonymTokenizerFactory.java From elasticsearch-analysis-synonym with Apache License 2.0 | 6 votes |
public NGramSynonymTokenizerFactory(final IndexSettings indexSettings, final Environment env, final String name, final Settings settings) { super(indexSettings, name, settings); ignoreCase = settings.getAsBoolean("ignore_case", true); n = settings.getAsInt("n", NGramSynonymTokenizer.DEFAULT_N_SIZE); delimiters = settings.get("delimiters", NGramSynonymTokenizer.DEFAULT_DELIMITERS); expand = settings.getAsBoolean("expand", true); synonymLoader = new SynonymLoader(env, settings, expand, SynonymLoader.getAnalyzer(ignoreCase)); if (synonymLoader.getSynonymMap() == null) { if (settings.getAsList("synonyms", null) != null) { logger.warn("synonyms values are empty."); } else if (settings.get("synonyms_path") != null) { logger.warn("synonyms_path[{}] is empty.", settings.get("synonyms_path")); } else { logger.debug("No synonym data."); } } }
Example 6
Source File: CJKBigramFilterFactory.java From crate with Apache License 2.0 | 6 votes |
CJKBigramFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); outputUnigrams = settings.getAsBoolean("output_unigrams", false); final List<String> asArray = settings.getAsList("ignored_scripts"); Set<String> scripts = new HashSet<>(Arrays.asList("han", "hiragana", "katakana", "hangul")); if (asArray != null) { scripts.removeAll(asArray); } int flags = 0; for (String script : scripts) { if ("han".equals(script)) { flags |= CJKBigramFilter.HAN; } else if ("hiragana".equals(script)) { flags |= CJKBigramFilter.HIRAGANA; } else if ("katakana".equals(script)) { flags |= CJKBigramFilter.KATAKANA; } else if ("hangul".equals(script)) { flags |= CJKBigramFilter.HANGUL; } } this.flags = flags; }
Example 7
Source File: StandardnumberService.java From elasticsearch-plugin-bundle with GNU Affero General Public License v3.0 | 5 votes |
/** * Called from {@link StandardnumberTokenFilter}. * @param settings settings * @param content content * @return a collection of variants of the detected standard number or an empty collection */ public Collection<CharSequence> lookup(Settings settings, CharSequence content) { List<String> stdnums = settings.getAsList("standardnumbers", DEFAULT_STANDARD_NUMBERS); if (stdnums.isEmpty()) { stdnums = DEFAULT_STANDARD_NUMBERS; } Collection<CharSequence> variants = new LinkedList<>(); for (String stdnum : stdnums) { try { StandardNumber standardNumber = StandardNumber.getInstance(stdnum); if (standardNumber instanceof ISBN) { handleISBN((ISBN) standardNumber, content, variants); } else { standardNumber = standardNumber.set(content).normalize(); if (standardNumber.isValid()) { for (String variant : standardNumber.getTypedVariants()) { if (variant != null) { variants.add(variant); } } } } } catch (NoSuchStandardNumberException e) { logger.error(e.getMessage(), e); } } return variants; }
Example 8
Source File: KeepWordFilterFactory.java From crate with Apache License 2.0 | 5 votes |
KeepWordFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); final List<String> arrayKeepWords = settings.getAsList(KEEP_WORDS_KEY, null); final String keepWordsPath = settings.get(KEEP_WORDS_PATH_KEY, null); if ((arrayKeepWords == null && keepWordsPath == null) || (arrayKeepWords != null && keepWordsPath != null)) { // we don't allow both or none throw new IllegalArgumentException("keep requires either `" + KEEP_WORDS_KEY + "` or `" + KEEP_WORDS_PATH_KEY + "` to be configured"); } if (settings.get(ENABLE_POS_INC_KEY) != null) { throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain"); } this.keepWords = Analysis.getWordSet(env, settings, KEEP_WORDS_KEY); }
Example 9
Source File: CharGroupTokenizerFactory.java From crate with Apache License 2.0 | 5 votes |
public CharGroupTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); for (final String c : settings.getAsList("tokenize_on_chars")) { if (c == null || c.length() == 0) { throw new RuntimeException("[tokenize_on_chars] cannot contain empty characters"); } if (c.length() == 1) { tokenizeOnChars.add((int) c.charAt(0)); } else if (c.charAt(0) == '\\') { tokenizeOnChars.add((int) parseEscapedChar(c)); } else { switch (c) { case "letter": tokenizeOnLetter = true; break; case "digit": tokenizeOnDigit = true; break; case "whitespace": tokenizeOnSpace = true; break; case "punctuation": tokenizeOnPunctuation = true; break; case "symbol": tokenizeOnSymbol = true; break; default: throw new RuntimeException("Invalid escaped char in [" + c + "]"); } } } }
Example 10
Source File: PatternCaptureGroupTokenFilterFactory.java From crate with Apache License 2.0 | 5 votes |
PatternCaptureGroupTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); List<String> regexes = settings.getAsList(PATTERNS_KEY, null, false); if (regexes == null) { throw new IllegalArgumentException("required setting '" + PATTERNS_KEY + "' is missing for token filter [" + name + "]"); } patterns = new Pattern[regexes.size()]; for (int i = 0; i < regexes.size(); i++) { patterns[i] = Pattern.compile(regexes.get(i)); } preserveOriginal = settings.getAsBoolean(PRESERVE_ORIG_KEY, true); }
Example 11
Source File: KeepTypesFilterFactory.java From crate with Apache License 2.0 | 5 votes |
KeepTypesFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); final List<String> arrayKeepTypes = settings.getAsList(KEEP_TYPES_KEY, null); if ((arrayKeepTypes == null)) { throw new IllegalArgumentException("keep_types requires `" + KEEP_TYPES_KEY + "` to be configured"); } this.includeMode = KeepTypesMode.fromString(settings.get(KEEP_TYPES_MODE_KEY, "include")); this.keepTypes = new HashSet<>(arrayKeepTypes); }
Example 12
Source File: DropAnalyzerPlan.java From crate with Apache License 2.0 | 5 votes |
@VisibleForTesting public static ClusterUpdateSettingsRequest createRequest(String analyzerName, FulltextAnalyzerResolver ftResolver) { Settings.Builder builder = Settings.builder(); builder.putNull(ANALYZER.buildSettingName(analyzerName)); Settings settings = ftResolver.getCustomAnalyzer(analyzerName); String tokenizerName = settings.get(ANALYZER.buildSettingChildName(analyzerName, TOKENIZER.getName())); if (tokenizerName != null && ftResolver.hasCustomThingy(tokenizerName, FulltextAnalyzerResolver.CustomType.TOKENIZER)) { builder.putNull(TOKENIZER.buildSettingName(tokenizerName)); } for (String tokenFilterName : settings .getAsList(ANALYZER.buildSettingChildName(analyzerName, TOKEN_FILTER.getName()))) { if (ftResolver.hasCustomThingy(tokenFilterName, FulltextAnalyzerResolver.CustomType.TOKEN_FILTER)) { builder.putNull(TOKEN_FILTER.buildSettingName(tokenFilterName)); } } for (String charFilterName : settings .getAsList(ANALYZER.buildSettingChildName(analyzerName, CHAR_FILTER.getName()))) { if (ftResolver.hasCustomThingy(charFilterName, FulltextAnalyzerResolver.CustomType.CHAR_FILTER)) { builder.putNull(CHAR_FILTER.buildSettingName(charFilterName)); } } return new ClusterUpdateSettingsRequest() .persistentSettings(builder.build()); }
Example 13
Source File: Analysis.java From crate with Apache License 2.0 | 5 votes |
public static CharArraySet parseStemExclusion(Settings settings, CharArraySet defaultStemExclusion) { String value = settings.get("stem_exclusion"); if ("_none_".equals(value)) { return CharArraySet.EMPTY_SET; } List<String> stemExclusion = settings.getAsList("stem_exclusion", null); if (stemExclusion != null) { // LUCENE 4 UPGRADE: Should be settings.getAsBoolean("stem_exclusion_case", false)? return new CharArraySet(stemExclusion, false); } else { return defaultStemExclusion; } }
Example 14
Source File: HtmlStripCharFilterFactory.java From crate with Apache License 2.0 | 5 votes |
HtmlStripCharFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name); List<String> escapedTags = settings.getAsList("escaped_tags"); if (escapedTags.size() > 0) { this.escapedTags = unmodifiableSet(newHashSet(escapedTags)); } else { this.escapedTags = null; } }
Example 15
Source File: FstDecompoundTokenFilterFactory.java From elasticsearch-plugin-bundle with GNU Affero General Public License v3.0 | 5 votes |
private FstDecompounder createDecompounder(Settings settings) { try { String words = settings.get("fst", "words.fst"); List<String> glueMorphs = settings.getAsList("glue_morphs"); return new FstDecompounder(getClass().getResourceAsStream(words), glueMorphs); } catch (IOException e) { throw new IllegalArgumentException("fst decompounder resources in settings not found: " + settings, e); } }
Example 16
Source File: DynamicRanker.java From elasticsearch-dynarank with Apache License 2.0 | 5 votes |
ScriptInfo(final String script, final String lang, final String scriptType, final Settings settings, final int reorderSize) { this.script = script; this.lang = lang; this.reorderSize = reorderSize; this.settings = new HashMap<>(); for (final String name : settings.keySet()) { final List<String> list = settings.getAsList(name); this.settings.put(name, list.toArray(new String[list.size()])); } if ("STORED".equalsIgnoreCase(scriptType)) { this.scriptType = ScriptType.STORED; } else { this.scriptType = ScriptType.INLINE; } }
Example 17
Source File: LDAPAuthenticationBackend.java From deprecated-security-advanced-modules with Apache License 2.0 | 5 votes |
public LDAPAuthenticationBackend(final Settings settings, final Path configPath) { this.settings = settings; this.configPath = configPath; this.userBaseSettings = getUserBaseSettings(settings); customAttrMaxValueLen = settings.getAsInt(ConfigConstants.LDAP_CUSTOM_ATTR_MAXVAL_LEN, 36); whitelistedAttributes = settings.getAsList(ConfigConstants.LDAP_CUSTOM_ATTR_WHITELIST, null); }
Example 18
Source File: FulltextAnalyzerResolver.java From crate with Apache License 2.0 | 4 votes |
/** * resolve the full settings necessary for the custom analyzer with name ``name`` * to be included in index-settings to get applied on an index. * <p> * Resolves all custom tokenizer, token-filter and char-filter settings and includes them * * @param name the name of the analyzer to resolve * @return Settings ready for inclusion into a CreateIndexRequest * @throws AnalyzerInvalidException if no custom analyzer with name ``name`` could be found */ public Settings resolveFullCustomAnalyzerSettings(String name) throws AnalyzerInvalidException { Settings.Builder builder = Settings.builder(); Settings analyzerSettings = getCustomAnalyzer(name); if (analyzerSettings != null) { builder.put(analyzerSettings); String tokenizerName = analyzerSettings.get(ANALYZER.buildSettingChildName(name, TOKENIZER.getName())); if (tokenizerName != null) { Settings customTokenizerSettings = getCustomTokenizer(tokenizerName); if (customTokenizerSettings != null) { builder.put(customTokenizerSettings); } else if (!hasBuiltInTokenizer(tokenizerName)) { throw new AnalyzerInvalidException( String.format(Locale.ENGLISH, "Invalid Analyzer: could not resolve tokenizer '%s'", tokenizerName)); } } List<String> tokenFilterNames = analyzerSettings.getAsList(ANALYZER.buildSettingChildName(name, TOKEN_FILTER.getName())); for (String tokenFilterName : tokenFilterNames) { Settings customTokenFilterSettings = getCustomTokenFilter(tokenFilterName); if (customTokenFilterSettings != null) { builder.put(customTokenFilterSettings); } else if (!hasBuiltInTokenFilter(tokenFilterName)) { throw new AnalyzerInvalidException( String.format(Locale.ENGLISH, "Invalid Analyzer: could not resolve token-filter '%s'", tokenFilterName)); } } List<String> charFilterNames = analyzerSettings.getAsList(ANALYZER.buildSettingChildName(name, CHAR_FILTER.getName())); for (String charFilterName : charFilterNames) { Settings customCharFilterSettings = getCustomCharFilter(charFilterName); if (customCharFilterSettings != null) { builder.put(customCharFilterSettings); } else if (!hasBuiltInCharFilter(charFilterName)) { throw new AnalyzerInvalidException( String.format(Locale.ENGLISH, "Invalid Analyzer: could not resolve char-filter '%s'", charFilterName)); } } } else { throw new AnalyzerUnknownException(name); } return builder.build(); }
Example 19
Source File: MultiplexerTokenFilterFactory.java From crate with Apache License 2.0 | 4 votes |
public MultiplexerTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) throws IOException { super(indexSettings, name, settings); this.filterNames = settings.getAsList("filters"); this.preserveOriginal = settings.getAsBoolean("preserve_original", true); }
Example 20
Source File: SynonymTokenFilterFactory.java From elasticsearch-analysis-synonym with Apache License 2.0 | 4 votes |
public SynonymTokenFilterFactory(final IndexSettings indexSettings, final Environment environment, final String name, final Settings settings, final AnalysisRegistry analysisRegistry) throws IOException { super(indexSettings, name, settings); this.ignoreCase = settings.getAsBoolean("ignore_case", false); final boolean expand = settings.getAsBoolean("expand", true); final String tokenizerName = settings.get("tokenizer", "whitespace"); AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory = null; if (analysisRegistry != null) { tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizerName, indexSettings); if (tokenizerFactoryFactory == null) { throw new IllegalArgumentException("failed to find tokenizer [" + tokenizerName + "] for synonym token filter"); } } final TokenizerFactory tokenizerFactory = tokenizerFactoryFactory == null ? null : tokenizerFactoryFactory.get(indexSettings, environment, tokenizerName, AnalysisRegistry .getSettingsFromIndexSettings(indexSettings, AnalysisRegistry.INDEX_ANALYSIS_TOKENIZER + "." + tokenizerName)); final Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(final String fieldName) { final Tokenizer tokenizer = tokenizerFactory == null ? new WhitespaceTokenizer() : tokenizerFactory.create(); final TokenStream stream = ignoreCase ? new LowerCaseFilter(tokenizer) : tokenizer; return new TokenStreamComponents(tokenizer, stream); } }; synonymLoader = new SynonymLoader(environment, settings, expand, analyzer); if (synonymLoader.getSynonymMap() == null) { if (settings.getAsList("synonyms", null) != null) { logger.warn("synonyms values are empty."); } else if (settings.get("synonyms_path") != null) { logger.warn("synonyms_path[{}] is empty.", settings.get("synonyms_path")); } else { throw new IllegalArgumentException("synonym requires either `synonyms` or `synonyms_path` to be configured"); } } }