Java Code Examples for org.apache.lucene.queryparser.classic.QueryParser#parse()
The following examples show how to use
org.apache.lucene.queryparser.classic.QueryParser#parse() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ProductIndex.java From arcusplatform with Apache License 2.0 | 6 votes |
public List<ProductCatalogEntry> search(String queryString) throws IOException, ParseException { List<ProductCatalogEntry> results = new ArrayList<ProductCatalogEntry>(); IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new SimpleAnalyzer(); QueryParser parser = new QueryParser(searchField, analyzer); Query query = parser.parse(queryString); TopDocs docs = searcher.search(query, 100); ScoreDoc[] hits = docs.scoreDocs; for (ScoreDoc sd: hits) { Document doc = searcher.doc(sd.doc); results.add(prodcat.getProductById(doc.get("id"))); } reader.close(); return results; }
Example 2
Source File: LuceneTextIndexReader.java From incubator-pinot with Apache License 2.0 | 6 votes |
/** * Get docIds from the text inverted index for a given raw value * @param value value to look for in the inverted index * @return docIDs in bitmap */ @Override public MutableRoaringBitmap getDocIds(Object value) { String searchQuery = (String) value; MutableRoaringBitmap docIds = new MutableRoaringBitmap(); Collector docIDCollector = new LuceneDocIdCollector(docIds, _docIdTranslator); try { // Lucene Query Parser is JavaCC based. It is stateful and should // be instantiated per query. Analyzer on the other hand is stateless // and can be created upfront. QueryParser parser = new QueryParser(_column, _standardAnalyzer); Query query = parser.parse(searchQuery); _indexSearcher.search(query, docIDCollector); return docIds; } catch (Exception e) { String msg = "Caught excepttion while searching the text index for column:" + _column + " search query:" + searchQuery; throw new RuntimeException(msg, e); } }
Example 3
Source File: UserInputQueryBuilder.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override public Query getQuery(Element e) throws ParserException { String text = DOMUtils.getText(e); try { Query q = null; if (unSafeParser != null) { //synchronize on unsafe parser synchronized (unSafeParser) { q = unSafeParser.parse(text); } } else { String fieldName = DOMUtils.getAttribute(e, "fieldName", defaultField); //Create new parser QueryParser parser = createQueryParser(fieldName, analyzer); q = parser.parse(text); } float boost = DOMUtils.getAttribute(e, "boost", 1.0f); return new BoostQuery(q, boost); } catch (ParseException e1) { throw new ParserException(e1.getMessage()); } }
Example 4
Source File: LuceneIndexSearch.java From sdudoc with MIT License | 6 votes |
/** * 查询方法 * @throws IOException * @throws CorruptIndexException * @throws ParseException */ public List Search(String searchString,LuceneResultCollector luceneResultCollector) throws CorruptIndexException, IOException, ParseException{ //方法一: System.out.println(this.indexSettings.getAnalyzer().getClass()+"----分词选择"); QueryParser q = new QueryParser(Version.LUCENE_44, "summary", this.indexSettings.getAnalyzer()); String search = new String(searchString.getBytes("ISO-8859-1"),"UTF-8"); System.out.println(search+"----------搜索的词语dd"); Query query = q.parse(search); //方法二: /* Term t = new Term("title", searchString); TermQuery query = new TermQuery(t); */ System.out.println(query.toString()+"--------query.tostring"); ScoreDoc[] docs = this.indexSearcher.search(query,100).scoreDocs; System.out.println("一共有:"+docs.length+"条记录"); List result = luceneResultCollector.collect(docs, this.indexSearcher); return result; }
Example 5
Source File: QueryAttributes.java From openbd-core with GNU General Public License v3.0 | 6 votes |
public boolean setCriteria( String _critera, String type, boolean _allowLeadingWildcard ) throws ParseException { if ( _critera == null || _critera.length() == 0 ) return false; // If this is 'simple' then we OR up all parameters if ( ( type == null || type.equalsIgnoreCase("simple") ) && _critera.indexOf(",") != -1 ){ String[] tokens = _critera.split(","); _critera = tokens[0]; for ( int x=1; x < tokens.length; x++ ) _critera += " OR " + tokens[x]; } if (_critera.equals("*")) // Special Case, this allows you to retrieve all results from the collection query = new MatchAllDocsQuery(); else { QueryParser qp = new QueryParser( DocumentWrap.CONTENTS, AnalyzerFactory.get( collectionsList.get(0).getLanguage() ) ); qp.setAllowLeadingWildcard( _allowLeadingWildcard ); query = qp.parse(_critera); } return true; }
Example 6
Source File: LuceneSearcher.java From jpress with GNU Lesser General Public License v3.0 | 6 votes |
private static Query buildQuery(String keyword) { try { Analyzer analyzer = createAnalyzer(); //这里使用text,防止搜索出html的tag或者tag中属性 QueryParser queryParser1 = new QueryParser("text", analyzer); Query termQuery1 = queryParser1.parse(keyword); BooleanClause booleanClause1 = new BooleanClause(termQuery1, BooleanClause.Occur.SHOULD); QueryParser queryParser2 = new QueryParser("title", analyzer); Query termQuery2 = queryParser2.parse(keyword); BooleanClause booleanClause2 = new BooleanClause(termQuery2, BooleanClause.Occur.SHOULD); BooleanQuery.Builder builder = new BooleanQuery.Builder(); builder.add(booleanClause1).add(booleanClause2); return builder.build(); } catch (ParseException e) { LOG.error(e.toString(), e); } return null; }
Example 7
Source File: PageDocumentSearcher.java From gravitee-management-rest-api with Apache License 2.0 | 6 votes |
@Override public SearchResult search(io.gravitee.rest.api.service.search.query.Query query) throws TechnicalException { QueryParser parser = new MultiFieldQueryParser(new String[]{ "name", "content" }, analyzer); parser.setFuzzyMinSim(0.6f); try { final Query parse = parser.parse(QueryParserBase.escape(query.getQuery())); BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(parse, BooleanClause.Occur.MUST); bq.add(new TermQuery(new Term(FIELD_TYPE, FIELD_TYPE_VALUE)), BooleanClause.Occur.MUST); return search(bq.build()); } catch (ParseException pe) { logger.error("Invalid query to search for page documents", pe); throw new TechnicalException("Invalid query to search for page documents", pe); } }
Example 8
Source File: Search.java From fnlp with GNU Lesser General Public License v3.0 | 5 votes |
/** * @param args * @throws IOException * @throws ParseException * @throws LoadModelException */ public static void main(String[] args) throws IOException, ParseException, LoadModelException { String indexPath = "../tmp/lucene"; System.out.println("Index directory '" + indexPath); Date start = new Date(); Directory dir = FSDirectory.open(new File(indexPath)); //需要先初始化 CNFactory CNFactory factory = CNFactory.getInstance("../models",Models.SEG_TAG); Analyzer analyzer = new FNLPAnalyzer(Version.LUCENE_47); // Now search the index: DirectoryReader ireader = DirectoryReader.open(dir); IndexSearcher isearcher = new IndexSearcher(ireader); // Parse a simple query that searches for "text": QueryParser parser = new QueryParser(Version.LUCENE_47, "content", analyzer); Query query = parser.parse("保修费用"); ScoreDoc[] hits = isearcher.search(query, null, 1000).scoreDocs; System.out.println("Hello World"); // Iterate through the results: for (int i = 0; i < hits.length; i++) { Document hitDoc = isearcher.doc(hits[i].doc); System.out.println(hitDoc.get("content")); System.out.println(hits[i].score); } ireader.close(); dir.close(); }
Example 9
Source File: MonitorTestBase.java From lucene-solr with Apache License 2.0 | 5 votes |
public static Query parse(String query) { QueryParser parser = new QueryParser(FIELD, ANALYZER); try { return parser.parse(query); } catch (ParseException e) { throw new IllegalArgumentException(e); } }
Example 10
Source File: HelpManagerImpl.java From sakai with Educational Community License v2.0 | 5 votes |
/** * Search Lucene * * @param queryStr * @param defaultField * @return * @throws ParseException */ protected Set<Resource> searchResources(String queryStr, String defaultField) throws ParseException { Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40); QueryParser parser = new QueryParser(Version.LUCENE_40, defaultField, analyzer); Query query = parser.parse(queryStr); return searchResources(query); }
Example 11
Source File: UserProfileDataQueryMapper.java From adam with GNU Lesser General Public License v3.0 | 5 votes |
@Override public Query convertToLuceneQuery(@Nonnull UserProfileDataQuery query) { final QueryParser parser = new MultiFieldQueryParser(LUCENE_VERSION, toFieldsArray(query), _luceneAnalyzerFactory.createAnalyzer()); parser.setDefaultOperator(AND); final String searchTerm = query.getSearchTerm(); try { return parser.parse(searchTerm != null ? searchTerm : ""); } catch (final ParseException e) { throw new RuntimeException("Unable to parse query: " + searchTerm, e); } }
Example 12
Source File: QueryFactory.java From incubator-atlas with Apache License 2.0 | 5 votes |
private QueryExpression create(Request request, ResourceDefinition resourceDefinition) throws InvalidQueryException { String queryString; if (request.getCardinality() == Request.Cardinality.INSTANCE) { String idPropertyName = resourceDefinition.getIdPropertyName(); queryString = String.format("%s:%s", idPropertyName, request.<String>getProperty(idPropertyName)); } else { queryString = request.getQueryString(); } QueryExpression queryExpression; if (queryString != null && !queryString.isEmpty()) { QueryParser queryParser = new QueryParser(Version.LUCENE_48, "name", new KeywordAnalyzer()); queryParser.setLowercaseExpandedTerms(false); queryParser.setAllowLeadingWildcard(true); Query query; try { query = queryParser.parse((String) escape(queryString)); } catch (ParseException e) { throw new InvalidQueryException(e.getMessage()); } LOG.info("LuceneQuery: {}", query); queryExpression = create(query, resourceDefinition); } else { queryExpression = new AlwaysQueryExpression(); } // add query properties to request so that they are returned request.addAdditionalSelectProperties(queryExpression.getProperties()); return queryExpression; }
Example 13
Source File: NLPIRTokenizerTest.java From nlpir-analysis-cn-ictclas with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { // NLPIR NLPIRTokenizerAnalyzer nta = new NLPIRTokenizerAnalyzer("", 1, "", "", false); // Index IndexWriterConfig inconf = new IndexWriterConfig(nta); inconf.setOpenMode(OpenMode.CREATE_OR_APPEND); IndexWriter index = new IndexWriter(FSDirectory.open(Paths.get("index/")), inconf); Document doc = new Document(); doc.add(new TextField("contents", "特朗普表示,很高兴汉堡会晤后再次同习近平主席通话。我同习主席就重大问题保持沟通和协调、两国加强各层级和各领域交往十分重要。当前,美中关系发展态势良好,我相信可以发展得更好。我期待着对中国进行国事访问。", Field.Store.YES)); index.addDocument(doc); index.flush(); index.close(); // Search String field = "contents"; IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get("index/"))); IndexSearcher searcher = new IndexSearcher(reader); QueryParser parser = new QueryParser(field, nta); Query query = parser.parse("特朗普习近平"); TopDocs top = searcher.search(query, 100); System.out.println("总条数:" + top.totalHits); ScoreDoc[] hits = top.scoreDocs; for (int i = 0; i < hits.length; i++) { System.out.println("doc=" + hits[i].doc + " score=" + hits[i].score); Document d = searcher.doc(hits[i].doc); System.out.println(d.get("contents")); } }
Example 14
Source File: HelpManagerImpl.java From sakai with Educational Community License v2.0 | 5 votes |
/** * Search Lucene * * @param queryStr * @param defaultField * @return * @throws ParseException */ protected Set<Resource> searchResources(String queryStr, String defaultField) throws ParseException { Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40); QueryParser parser = new QueryParser(Version.LUCENE_40, defaultField, analyzer); Query query = parser.parse(queryStr); return searchResources(query); }
Example 15
Source File: SearchUtil.java From everywhere with Apache License 2.0 | 5 votes |
public static Query buildQuery(String searchText, String searchField) { Query query = null; try { if (searchField.equals(LuceneConstants.CONTENT)) { QueryParser qp = new QueryParser(searchField, analyzer); query = qp.parse(searchText); } else { Term term = new Term(LuceneConstants.PATH, searchText); query = new TermQuery(term); } } catch (Exception e) { e.printStackTrace(); } return query; }
Example 16
Source File: SearchBuilder.java From taoshop with Apache License 2.0 | 5 votes |
public static void doSearch(String indexDir , String queryStr) throws IOException, ParseException, InvalidTokenOffsetsException { Directory directory = FSDirectory.open(Paths.get(indexDir)); DirectoryReader reader = DirectoryReader.open(directory); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new SmartChineseAnalyzer(); QueryParser parser = new QueryParser("tcontent",analyzer); Query query = parser.parse(queryStr); long startTime = System.currentTimeMillis(); TopDocs docs = searcher.search(query,10); System.out.println("查找"+queryStr+"所用时间:"+(System.currentTimeMillis()-startTime)); System.out.println("查询到"+docs.totalHits+"条记录"); //加入高亮显示的 SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter("<b><font color=red>","</font></b>"); QueryScorer scorer = new QueryScorer(query);//计算查询结果最高的得分 Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);//根据得分算出一个片段 Highlighter highlighter = new Highlighter(simpleHTMLFormatter,scorer); highlighter.setTextFragmenter(fragmenter);//设置显示高亮的片段 //遍历查询结果 for(ScoreDoc scoreDoc : docs.scoreDocs){ Document doc = searcher.doc(scoreDoc.doc); System.out.println(doc.get("title")); String tcontent = doc.get("tcontent"); if(tcontent != null){ TokenStream tokenStream = analyzer.tokenStream("tcontent", new StringReader(tcontent)); String summary = highlighter.getBestFragment(tokenStream, tcontent); System.out.println(summary); } } reader.close(); }
Example 17
Source File: SearchBuilder.java From taoshop with Apache License 2.0 | 5 votes |
public static void doSearch(String indexDir , String queryStr) throws IOException, ParseException, InvalidTokenOffsetsException { Directory directory = FSDirectory.open(Paths.get(indexDir)); DirectoryReader reader = DirectoryReader.open(directory); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new SmartChineseAnalyzer(); QueryParser parser = new QueryParser("tcontent",analyzer); Query query = parser.parse(queryStr); long startTime = System.currentTimeMillis(); TopDocs docs = searcher.search(query,10); System.out.println("查找"+queryStr+"所用时间:"+(System.currentTimeMillis()-startTime)); System.out.println("查询到"+docs.totalHits+"条记录"); //加入高亮显示的 SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter("<b><font color=red>","</font></b>"); QueryScorer scorer = new QueryScorer(query);//计算查询结果最高的得分 Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);//根据得分算出一个片段 Highlighter highlighter = new Highlighter(simpleHTMLFormatter,scorer); highlighter.setTextFragmenter(fragmenter);//设置显示高亮的片段 //遍历查询结果 for(ScoreDoc scoreDoc : docs.scoreDocs){ Document doc = searcher.doc(scoreDoc.doc); System.out.println(doc.get("title")); System.out.println(doc.get("tcontent")); String tcontent = doc.get("tcontent"); if(tcontent != null){ TokenStream tokenStream = analyzer.tokenStream("tcontent", new StringReader(tcontent)); String summary = highlighter.getBestFragment(tokenStream, tcontent); System.out.println(summary); } } reader.close(); }
Example 18
Source File: DocumentSearcher.java From meghanada-server with GNU General Public License v3.0 | 4 votes |
private Query getQuery(final String field, final String query) throws ParseException { final QueryParser queryParser = new QueryParser(field, analyzer); queryParser.setAllowLeadingWildcard(true); queryParser.setDefaultOperator(QueryParser.Operator.OR); return queryParser.parse(query); }
Example 19
Source File: SampleSearchDemo.java From elasticsearch-full with Apache License 2.0 | 4 votes |
public static void main(String []args) throws IOException, ParseException { Analyzer analyzer = new StandardAnalyzer(); Directory directory = FSDirectory.open(Paths.get("/Users/admin/lucene")); DirectoryReader ireader = DirectoryReader.open(directory); IndexSearcher indexSearcher = new IndexSearcher(ireader); QueryParser parser = new QueryParser("fieldname", analyzer); Query query = parser.parse("text"); ScoreDoc[] hits = indexSearcher.search(query, 10, Sort.INDEXORDER).scoreDocs; for (int i = 0; i < hits.length; i++) { Document hitDoc = indexSearcher.doc(hits[i].doc); System.out.println(hitDoc.toString()); } ireader.close(); directory.close(); }
Example 20
Source File: SearchQuery.java From HongsCORE with MIT License | 4 votes |
@Override public Query wdr(String k, Object v) { if (null == v ) { throw new NullPointerException("Query for "+k+" must be string, but null"); } if ("".equals(v)) { throw new NullPointerException("Query for "+k+" can not be empty string" ); } QueryParser qp = new QueryParser("$" + k, ana != null ? ana : new StandardAnalyzer()); String s = v.toString( ); // 是否转义 if (des == null || !des) { s = QueryParser.escape(s); } // 词间关系 if (dor == null || !dor) { qp.setDefaultOperator(QueryParser.AND_OPERATOR); } else { qp.setDefaultOperator(QueryParser. OR_OPERATOR); } // 其他设置 if (phr != null) qp.setPhraseSlop (phr); if (fms != null) qp.setFuzzyMinSim (fms); if (fpl != null) qp.setFuzzyPrefixLength(fpl); // if (art != null) qp.setAnalyzeRangeTerms(art); if (sow != null) qp.setSplitOnWhitespace(sow); if (alw != null) qp.setAllowLeadingWildcard (alw); // if (let != null) qp.setLowercaseExpandedTerms (let); if (epi != null) qp.setEnablePositionIncrements (epi); if (agp != null) qp.setAutoGeneratePhraseQueries(agp); try { Query q2 = qp.parse(s); return q2 ; } catch ( ParseException e) { throw new HongsExemption(e); } }