org.elasticsearch.common.inject.assistedinject.Assisted Java Examples
The following examples show how to use
org.elasticsearch.common.inject.assistedinject.Assisted.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PathHierarchyTokenizerFactory.java From Elasticsearch with Apache License 2.0 | 7 votes |
@Inject public PathHierarchyTokenizerFactory(Index index, IndexSettingsService indexSettingsService, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); bufferSize = settings.getAsInt("buffer_size", 1024); String delimiter = settings.get("delimiter"); if (delimiter == null) { this.delimiter = PathHierarchyTokenizer.DEFAULT_DELIMITER; } else if (delimiter.length() > 1) { throw new IllegalArgumentException("delimiter can only be a one char value"); } else { this.delimiter = delimiter.charAt(0); } String replacement = settings.get("replacement"); if (replacement == null) { this.replacement = this.delimiter; } else if (replacement.length() > 1) { throw new IllegalArgumentException("replacement can only be a one char value"); } else { this.replacement = replacement.charAt(0); } this.skip = settings.getAsInt("skip", PathHierarchyTokenizer.DEFAULT_SKIP); this.reverse = settings.getAsBoolean("reverse", false); }
Example #2
Source File: PatternAnalyzerProvider.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Inject public PatternAnalyzerProvider(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); Version esVersion = Version.indexCreated(indexSettingsService.getSettings()); final CharArraySet defaultStopwords; if (esVersion.onOrAfter(Version.V_1_0_0_RC1)) { defaultStopwords = CharArraySet.EMPTY_SET; } else { defaultStopwords = StopAnalyzer.ENGLISH_STOP_WORDS_SET; } boolean lowercase = settings.getAsBoolean("lowercase", true); CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords); String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/); if (sPattern == null) { throw new IllegalArgumentException("Analyzer [" + name + "] of type pattern must have a `pattern` set"); } Pattern pattern = Regex.compile(sPattern, settings.get("flags")); analyzer = new PatternAnalyzer(pattern, lowercase, stopWords); }
Example #3
Source File: CJKBigramFilterFactory.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Inject public CJKBigramFilterFactory(Index index, IndexSettingsService indexSettingsService, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); outputUnigrams = settings.getAsBoolean("output_unigrams", false); final String[] asArray = settings.getAsArray("ignored_scripts"); Set<String> scripts = new HashSet<>(Arrays.asList("han", "hiragana", "katakana", "hangul")); if (asArray != null) { scripts.removeAll(Arrays.asList(asArray)); } int flags = 0; for (String script : scripts) { if ("han".equals(script)) { flags |= CJKBigramFilter.HAN; } else if ("hiragana".equals(script)) { flags |= CJKBigramFilter.HIRAGANA; } else if ("katakana".equals(script)) { flags |= CJKBigramFilter.KATAKANA; } else if ("hangul".equals(script)) { flags |= CJKBigramFilter.HANGUL; } } this.flags = flags; }
Example #4
Source File: HyphenationCompoundWordTokenFilterFactory.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Inject public HyphenationCompoundWordTokenFilterFactory(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), env, name, settings); String hyphenationPatternsPath = settings.get("hyphenation_patterns_path", null); if (hyphenationPatternsPath == null) { throw new IllegalArgumentException("hyphenation_patterns_path is a required setting."); } Path hyphenationPatternsFile = env.configFile().resolve(hyphenationPatternsPath); try { hyphenationTree = HyphenationCompoundWordTokenFilter.getHyphenationTree(new InputSource(Files.newInputStream(hyphenationPatternsFile))); } catch (Exception e) { throw new IllegalArgumentException("Exception while reading hyphenation_patterns_path: " + e.getMessage()); } }
Example #5
Source File: HunspellTokenFilterFactory.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Inject public HunspellTokenFilterFactory(Index index, IndexSettingsService indexSettingsService, @Assisted String name, @Assisted Settings settings, HunspellService hunspellService) { super(index, indexSettingsService.getSettings(), name, settings); String locale = settings.get("locale", settings.get("language", settings.get("lang", null))); if (locale == null) { throw new IllegalArgumentException("missing [locale | language | lang] configuration for hunspell token filter"); } dictionary = hunspellService.getDictionary(locale); if (dictionary == null) { throw new IllegalArgumentException(String.format(Locale.ROOT, "Unknown hunspell dictionary for locale [%s]", locale)); } dedup = settings.getAsBoolean("dedup", true); longestOnly = settings.getAsBoolean("longest_only", false); }
Example #6
Source File: KeepWordFilterFactory.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Inject public KeepWordFilterFactory(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); final String[] arrayKeepWords = settings.getAsArray(KEEP_WORDS_KEY, null); final String keepWordsPath = settings.get(KEEP_WORDS_PATH_KEY, null); if ((arrayKeepWords == null && keepWordsPath == null) || (arrayKeepWords != null && keepWordsPath != null)) { // we don't allow both or none throw new IllegalArgumentException("keep requires either `" + KEEP_WORDS_KEY + "` or `" + KEEP_WORDS_PATH_KEY + "` to be configured"); } if (version.onOrAfter(Version.LUCENE_4_4) && settings.get(ENABLE_POS_INC_KEY) != null) { throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use" + " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs."); } enablePositionIncrements = version.onOrAfter(Version.LUCENE_4_4) ? true : settings.getAsBoolean(ENABLE_POS_INC_KEY, true); this.keepWords = Analysis.getWordSet(env, settings, KEEP_WORDS_KEY); }
Example #7
Source File: TrimTokenFilterFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public TrimTokenFilterFactory(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); if (version.onOrAfter(Version.LUCENE_4_4_0) && settings.get(UPDATE_OFFSETS_KEY) != null) { throw new IllegalArgumentException(UPDATE_OFFSETS_KEY + " is not supported anymore. Please fix your analysis chain or use" + " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs."); } this.updateOffsets = settings.getAsBoolean("update_offsets", false); }
Example #8
Source File: PatternReplaceTokenFilterFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public PatternReplaceTokenFilterFactory(Index index, IndexSettingsService indexSettingsService, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); String sPattern = settings.get("pattern", null); if (sPattern == null) { throw new IllegalArgumentException("pattern is missing for [" + name + "] token filter of type 'pattern_replace'"); } this.pattern = Regex.compile(sPattern, settings.get("flags")); this.replacement = settings.get("replacement", ""); this.all = settings.getAsBoolean("all", true); }
Example #9
Source File: BrazilianAnalyzerProvider.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public BrazilianAnalyzerProvider(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); analyzer = new BrazilianAnalyzer(Analysis.parseStopWords(env, settings, BrazilianAnalyzer.getDefaultStopSet()), Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)); analyzer.setVersion(version); }
Example #10
Source File: CommonGramsTokenFilterFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public CommonGramsTokenFilterFactory(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); this.ignoreCase = settings.getAsBoolean("ignore_case", false); this.queryMode = settings.getAsBoolean("query_mode", false); this.words = Analysis.parseCommonWords(env, settings, null, ignoreCase); if (this.words == null) { throw new IllegalArgumentException("mising or empty [common_words] or [common_words_path] configuration for common_grams token filter"); } }
Example #11
Source File: SpanishAnalyzerProvider.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public SpanishAnalyzerProvider(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); analyzer = new SpanishAnalyzer(Analysis.parseStopWords(env, settings, SpanishAnalyzer.getDefaultStopSet()), Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)); analyzer.setVersion(version); }
Example #12
Source File: StandardHtmlStripAnalyzerProvider.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public StandardHtmlStripAnalyzerProvider(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); this.esVersion = Version.indexCreated(indexSettingsService.getSettings()); final CharArraySet defaultStopwords; if (esVersion.onOrAfter(Version.V_1_0_0_RC1)) { defaultStopwords = CharArraySet.EMPTY_SET; } else { defaultStopwords = StopAnalyzer.ENGLISH_STOP_WORDS_SET; } CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords); analyzer = new StandardHtmlStripAnalyzer(stopWords); analyzer.setVersion(version); }
Example #13
Source File: BulgarianAnalyzerProvider.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public BulgarianAnalyzerProvider(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); analyzer = new BulgarianAnalyzer(Analysis.parseStopWords(env, settings, BulgarianAnalyzer.getDefaultStopSet()), Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)); analyzer.setVersion(version); }
Example #14
Source File: HtmlStripCharFilterFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public HtmlStripCharFilterFactory(Index index, IndexSettingsService indexSettingsService, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name); String[] escapedTags = settings.getAsArray("escaped_tags"); if (escapedTags.length > 0) { this.escapedTags = ImmutableSet.copyOf(escapedTags); } else { this.escapedTags = null; } }
Example #15
Source File: IndonesianAnalyzerProvider.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public IndonesianAnalyzerProvider(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); analyzer = new IndonesianAnalyzer(Analysis.parseStopWords(env, settings, IndonesianAnalyzer.getDefaultStopSet()), Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)); analyzer.setVersion(version); }
Example #16
Source File: TruncateTokenFilterFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public TruncateTokenFilterFactory(Index index, IndexSettingsService indexSettingsService, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); this.length = settings.getAsInt("length", -1); if (length <= 0) { throw new IllegalArgumentException("length parameter must be provided"); } }
Example #17
Source File: StopTokenFilterFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public StopTokenFilterFactory(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); this.ignoreCase = settings.getAsBoolean("ignore_case", false); this.removeTrailing = settings.getAsBoolean("remove_trailing", true); this.stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase); if (version.onOrAfter(Version.LUCENE_4_4) && settings.get("enable_position_increments") != null) { throw new IllegalArgumentException("enable_position_increments is not supported anymore as of Lucene 4.4 as it can create broken token streams." + " Please fix your analysis chain or use an older compatibility version (<= 4.3)."); } this.enablePositionIncrements = settings.getAsBoolean("enable_position_increments", true); }
Example #18
Source File: MappingCharFilterFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public MappingCharFilterFactory(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name); List<String> rules = Analysis.getWordList(env, settings, "mappings"); if (rules == null) { throw new IllegalArgumentException("mapping requires either `mappings` or `mappings_path` to be configured"); } NormalizeCharMap.Builder normMapBuilder = new NormalizeCharMap.Builder(); parseRules(rules, normMapBuilder); normMap = normMapBuilder.build(); }
Example #19
Source File: EnglishAnalyzerProvider.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public EnglishAnalyzerProvider(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); analyzer = new EnglishAnalyzer(Analysis.parseStopWords(env, settings, EnglishAnalyzer.getDefaultStopSet()), Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)); analyzer.setVersion(version); }
Example #20
Source File: LithuanianAnalyzerProvider.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public LithuanianAnalyzerProvider(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); analyzer = new LithuanianAnalyzer(Analysis.parseStopWords(env, settings, LithuanianAnalyzer.getDefaultStopSet()), Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)); analyzer.setVersion(version); }
Example #21
Source File: KeepTypesFilterFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public KeepTypesFilterFactory(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); final String[] arrayKeepTypes = settings.getAsArray(KEEP_TYPES_KEY, null); if ((arrayKeepTypes == null)) { throw new IllegalArgumentException("keep_types requires `" + KEEP_TYPES_KEY + "` to be configured"); } this.keepTypes = new HashSet<>(Arrays.asList(arrayKeepTypes)); }
Example #22
Source File: PortugueseAnalyzerProvider.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public PortugueseAnalyzerProvider(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); analyzer = new PortugueseAnalyzer(Analysis.parseStopWords(env, settings, PortugueseAnalyzer.getDefaultStopSet()), Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)); analyzer.setVersion(version); }
Example #23
Source File: ShingleTokenFilterFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public ShingleTokenFilterFactory(Index index, IndexSettingsService indexSettingsService, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); Integer maxShingleSize = settings.getAsInt("max_shingle_size", ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE); Integer minShingleSize = settings.getAsInt("min_shingle_size", ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE); Boolean outputUnigrams = settings.getAsBoolean("output_unigrams", true); Boolean outputUnigramsIfNoShingles = settings.getAsBoolean("output_unigrams_if_no_shingles", false); String tokenSeparator = settings.get("token_separator", ShingleFilter.DEFAULT_TOKEN_SEPARATOR); String fillerToken = settings.get("filler_token", ShingleFilter.DEFAULT_FILLER_TOKEN); factory = new Factory("shingle", minShingleSize, maxShingleSize, outputUnigrams, outputUnigramsIfNoShingles, tokenSeparator, fillerToken); }
Example #24
Source File: DanishAnalyzerProvider.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public DanishAnalyzerProvider(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); analyzer = new DanishAnalyzer(Analysis.parseStopWords(env, settings, DanishAnalyzer.getDefaultStopSet()), Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)); analyzer.setVersion(version); }
Example #25
Source File: GalicianAnalyzerProvider.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public GalicianAnalyzerProvider(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); analyzer = new GalicianAnalyzer(Analysis.parseStopWords(env, settings, GalicianAnalyzer.getDefaultStopSet()), Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)); analyzer.setVersion(version); }
Example #26
Source File: AbstractCompoundWordTokenFilterFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
public AbstractCompoundWordTokenFilterFactory(Index index, Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettings, name, settings); minWordSize = settings.getAsInt("min_word_size", CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE); minSubwordSize = settings.getAsInt("min_subword_size", CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE); maxSubwordSize = settings.getAsInt("max_subword_size", CompoundWordTokenFilterBase.DEFAULT_MAX_SUBWORD_SIZE); onlyLongestMatch = settings.getAsBoolean("only_longest_match", false); wordList = Analysis.getWordSet(env, settings, "word_list"); if (wordList == null) { throw new IllegalArgumentException("word_list must be provided for [" + name + "], either as a path to a file, or directly"); } }
Example #27
Source File: ArmenianAnalyzerProvider.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public ArmenianAnalyzerProvider(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); analyzer = new ArmenianAnalyzer(Analysis.parseStopWords(env, settings, ArmenianAnalyzer.getDefaultStopSet()), Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)); analyzer.setVersion(version); }
Example #28
Source File: KeywordMarkerTokenFilterFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public KeywordMarkerTokenFilterFactory(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); boolean ignoreCase = settings.getAsBoolean("ignore_case", false); Set<?> rules = Analysis.getWordSet(env, settings, "keywords"); if (rules == null) { throw new IllegalArgumentException("keyword filter requires either `keywords` or `keywords_path` to be configured"); } keywordLookup = new CharArraySet(rules, ignoreCase); }
Example #29
Source File: IrishAnalyzerProvider.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public IrishAnalyzerProvider(Index index, IndexSettingsService indexSettingsService, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); analyzer = new IrishAnalyzer(Analysis.parseStopWords(env, settings, IrishAnalyzer.getDefaultStopSet()), Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)); analyzer.setVersion(version); }
Example #30
Source File: PatternTokenizerFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public PatternTokenizerFactory(Index index, IndexSettingsService indexSettingsService, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/); if (sPattern == null) { throw new IllegalArgumentException("pattern is missing for [" + name + "] tokenizer of type 'pattern'"); } this.pattern = Regex.compile(sPattern, settings.get("flags")); this.group = settings.getAsInt("group", -1); }