org.apache.lucene.analysis.core.KeywordTokenizerFactory Java Examples
The following examples show how to use
org.apache.lucene.analysis.core.KeywordTokenizerFactory.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SolrUtil.java From ambari-logsearch with Apache License 2.0 | 6 votes |
public static String putWildCardByType(String str, String fieldType, String fieldTypeMetaData) { Map<String, Object> fieldTypeInfoMap = getFieldTypeInfoMap(fieldTypeMetaData); if (StringUtils.isNotBlank(fieldType)) { if (isSolrFieldNumber(fieldTypeInfoMap)) { String value = putEscapeCharacterForNumber(str, fieldTypeInfoMap); if (StringUtils.isNotBlank(value)) { return value; } else { return null; } } else if (checkTokenizer(StandardTokenizerFactory.class, fieldTypeInfoMap)) { return escapeForStandardTokenizer(str); } else if (checkTokenizer(KeywordTokenizerFactory.class, fieldTypeInfoMap) || "string".equalsIgnoreCase(fieldType)) { return makeSolrSearchStringWithoutAsterisk(str); } else if (checkTokenizer(PathHierarchyTokenizerFactory.class, fieldTypeInfoMap)) { return str; } else { return escapeQueryChars(str); } } return str; }
Example #2
Source File: NestPathField.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override public void setArgs(IndexSchema schema, Map<String, String> args) { args.putIfAbsent("stored", "false"); args.putIfAbsent("omitTermFreqAndPositions", "true"); args.putIfAbsent("omitNorms", "true"); args.putIfAbsent("maxCharsForDocValues", "-1"); super.setArgs(schema, args); // CustomAnalyzer is easy to use CustomAnalyzer customAnalyzer; try { customAnalyzer = CustomAnalyzer.builder(schema.getResourceLoader()) .withDefaultMatchVersion(schema.getDefaultLuceneMatchVersion()) .withTokenizer(KeywordTokenizerFactory.class) .addTokenFilter(PatternReplaceFilterFactory.class, "pattern", "#\\d*", "replace", "all") .build(); } catch (IOException e) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);//impossible? } // Solr HTTP Schema APIs don't know about CustomAnalyzer so use TokenizerChain instead setIndexAnalyzer(new TokenizerChain(customAnalyzer)); // leave queryAnalyzer as literal }
Example #3
Source File: CustomAnalyzerStrField.java From lucene-solr with Apache License 2.0 | 6 votes |
public CustomAnalyzerStrField() { Random r = LuceneTestCase.random(); // two arg constructor Analyzer a2 = new TokenizerChain (new KeywordTokenizerFactory(new HashMap<>()), r.nextBoolean() ? null : new TokenFilterFactory[0]); // three arg constructor Analyzer a3 = new TokenizerChain (r.nextBoolean() ? null : new CharFilterFactory[0], new KeywordTokenizerFactory(new HashMap<>()), r.nextBoolean() ? null : new TokenFilterFactory[0]); if (r.nextBoolean()) { indexAnalyzer = a2; queryAnalyzer = a3; } else { queryAnalyzer = a2; indexAnalyzer = a3; } }
Example #4
Source File: AnalyzerFactory.java From airsonic-advanced with GNU General Public License v3.0 | 4 votes |
private Builder createKeywordAnalyzerBuilder() throws IOException { return CustomAnalyzer.builder() .withTokenizer(KeywordTokenizerFactory.class); }
Example #5
Source File: AnalyzerFactory.java From airsonic with GNU General Public License v3.0 | 4 votes |
private Builder createKeywordAnalyzerBuilder() throws IOException { return CustomAnalyzer.builder() .withTokenizer(KeywordTokenizerFactory.class); }