org.apache.lucene.queryparser.classic.QueryParser Java Examples
The following examples show how to use
org.apache.lucene.queryparser.classic.QueryParser.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestPersistentProvenanceRepository.java From localization_nifi with Apache License 2.0 | 7 votes |
private List<Document> runQuery(final File indexDirectory, final List<File> storageDirs, final String query) throws IOException, ParseException { try (final DirectoryReader directoryReader = DirectoryReader.open(FSDirectory.open(indexDirectory))) { final IndexSearcher searcher = new IndexSearcher(directoryReader); final Analyzer analyzer = new SimpleAnalyzer(); final org.apache.lucene.search.Query luceneQuery = new QueryParser("uuid", analyzer).parse(query); final Query q = new Query(""); q.setMaxResults(1000); final TopDocs topDocs = searcher.search(luceneQuery, 1000); final List<Document> docs = new ArrayList<>(); for (final ScoreDoc scoreDoc : topDocs.scoreDocs) { final int docId = scoreDoc.doc; final Document d = directoryReader.document(docId); docs.add(d); } return docs; } }
Example #2
Source File: FTConnLucene.java From openprodoc with GNU Affero General Public License v3.0 | 6 votes |
/** * * @param Type * @param sDocMetadata * @param sBody * @param sMetadata * @return * @throws PDException */ @Override protected ArrayList<String> Search(String Type, String sDocMetadata, String sBody, String sMetadata) throws PDException { ArrayList<String> Res=new ArrayList(); IndexSearcher isearcher=null; try { isearcher=SM.acquire(); sBody=sBody.toLowerCase(); Query query = new QueryParser(F_FULLTEXT,analyzer).parse(sBody); ScoreDoc[] hits = isearcher.search(query, MAXRESULTS).scoreDocs; for (ScoreDoc hit : hits) Res.add(isearcher.doc(hit.doc).get(F_ID)); SM.release(isearcher); //ireader.close(); //directory.close(); } catch (Exception ex) { try { SM.release(isearcher); } catch (Exception e) {} PDException.GenPDException("Error_Searching_doc_FT:", ex.getLocalizedMessage()); } return(Res); }
Example #3
Source File: LuceneIndexSearch.java From sdudoc with MIT License | 6 votes |
/** * 查询方法 * @throws IOException * @throws CorruptIndexException * @throws ParseException */ public List Search(String searchString,LuceneResultCollector luceneResultCollector) throws CorruptIndexException, IOException, ParseException{ //方法一: System.out.println(this.indexSettings.getAnalyzer().getClass()+"----分词选择"); QueryParser q = new QueryParser(Version.LUCENE_44, "summary", this.indexSettings.getAnalyzer()); String search = new String(searchString.getBytes("ISO-8859-1"),"UTF-8"); System.out.println(search+"----------搜索的词语dd"); Query query = q.parse(search); //方法二: /* Term t = new Term("title", searchString); TermQuery query = new TermQuery(t); */ System.out.println(query.toString()+"--------query.tostring"); ScoreDoc[] docs = this.indexSearcher.search(query,100).scoreDocs; System.out.println("一共有:"+docs.length+"条记录"); List result = luceneResultCollector.collect(docs, this.indexSearcher); return result; }
Example #4
Source File: LuceneInMemorySentenceRetrievalExecutor.java From bioasq with Apache License 2.0 | 6 votes |
@Override public void initialize(UimaContext context) throws ResourceInitializationException { super.initialize(context); // initialize sentence chunker TokenizerFactory tokenizerFactory = UimaContextHelper.createObjectFromConfigParameter(context, "tokenizer-factory", "tokenizer-factory-params", IndoEuropeanTokenizerFactory.class, TokenizerFactory.class); SentenceModel sentenceModel = UimaContextHelper.createObjectFromConfigParameter(context, "sentence-model", "sentence-model-params", IndoEuropeanSentenceModel.class, SentenceModel.class); chunker = new SentenceChunker(tokenizerFactory, sentenceModel); // initialize hits hits = UimaContextHelper.getConfigParameterIntValue(context, "hits", 200); // initialize query analyzer, index writer config, and query parser analyzer = UimaContextHelper.createObjectFromConfigParameter(context, "query-analyzer", "query-analyzer-params", StandardAnalyzer.class, Analyzer.class); parser = new QueryParser("text", analyzer); // initialize query string constructor queryStringConstructor = UimaContextHelper.createObjectFromConfigParameter(context, "query-string-constructor", "query-string-constructor-params", BooleanBagOfPhraseQueryStringConstructor.class, QueryStringConstructor.class); }
Example #5
Source File: ImprovedLuceneInMemorySentenceRetrievalExecutor.java From bioasq with Apache License 2.0 | 6 votes |
@Override public void initialize(UimaContext context) throws ResourceInitializationException { super.initialize(context); TokenizerFactory tokenizerFactory = UimaContextHelper.createObjectFromConfigParameter(context, "tokenizer-factory", "tokenizer-factory-params", IndoEuropeanTokenizerFactory.class, TokenizerFactory.class); SentenceModel sentenceModel = UimaContextHelper.createObjectFromConfigParameter(context, "sentence-model", "sentence-model-params", IndoEuropeanSentenceModel.class, SentenceModel.class); chunker = new SentenceChunker(tokenizerFactory, sentenceModel); // initialize hits hits = UimaContextHelper.getConfigParameterIntValue(context, "hits", 200); // initialize query analyzer, index writer config, and query parser analyzer = UimaContextHelper.createObjectFromConfigParameter(context, "query-analyzer", "query-analyzer-params", StandardAnalyzer.class, Analyzer.class); parser = new QueryParser("text", analyzer); // initialize query string constructor queryStringConstructor = UimaContextHelper.createObjectFromConfigParameter(context, "query-string-constructor", "query-string-constructor-params", BagOfPhraseQueryStringConstructor.class, QueryStringConstructor.class); String parserProviderName = UimaContextHelper .getConfigParameterStringValue(context, "parser-provider"); parserProvider = ProviderCache.getProvider(parserProviderName, ParserProvider.class); lemma = new StanfordLemmatizer(); }
Example #6
Source File: LuceneFileSearch.java From tutorials with MIT License | 6 votes |
public List<Document> searchFiles(String inField, String queryString) { try { Query query = new QueryParser(inField, analyzer).parse(queryString); IndexReader indexReader = DirectoryReader.open(indexDirectory); IndexSearcher searcher = new IndexSearcher(indexReader); TopDocs topDocs = searcher.search(query, 10); List<Document> documents = new ArrayList<>(); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { documents.add(searcher.doc(scoreDoc.doc)); } return documents; } catch (IOException | ParseException e) { e.printStackTrace(); } return null; }
Example #7
Source File: LogRegDocumentReranker.java From bioasq with Apache License 2.0 | 6 votes |
@Override public void initialize(UimaContext context) throws ResourceInitializationException { super.initialize(context); hits = UimaContextHelper.getConfigParameterIntValue(context, "hits", 100); analyzer = UimaContextHelper.createObjectFromConfigParameter(context, "query-analyzer", "query-analyzer-params", StandardAnalyzer.class, Analyzer.class); queryStringConstructor = UimaContextHelper.createObjectFromConfigParameter(context, "query-string-constructor", "query-string-constructor-params", LuceneQueryStringConstructor.class, QueryStringConstructor.class); parser = new QueryParser("text", analyzer); // load parameters String param = UimaContextHelper.getConfigParameterStringValue(context, "doc-logreg-params"); try { docFeatWeights = Resources.readLines(getClass().getResource(param), UTF_8).stream().limit(1) .map(line -> line.split("\t")).flatMap(Arrays::stream) .mapToDouble(Double::parseDouble).toArray(); } catch (IOException e) { throw new ResourceInitializationException(e); } }
Example #8
Source File: CodePatternSearcher.java From SnowGraph with Apache License 2.0 | 6 votes |
private static List<String> search(List<String> contents, String query, int n) throws IOException, ParseException { List<String> r=new ArrayList<>(); Directory dir=new RAMDirectory(); IndexWriter indexWriter=new IndexWriter(dir, new IndexWriterConfig(new EnglishAnalyzer())); for (String method:contents){ Document document=new Document(); document.add(new TextField("content",method, Field.Store.YES)); indexWriter.addDocument(document); } indexWriter.close(); QueryParser qp = new QueryParser("content", new EnglishAnalyzer()); IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(dir)); TopDocs topDocs = indexSearcher.search(qp.parse(query), n); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { r.add(indexSearcher.doc(scoreDoc.doc).get("content")); } return r; }
Example #9
Source File: LuceneTextIndexReader.java From incubator-pinot with Apache License 2.0 | 6 votes |
/** * Get docIds from the text inverted index for a given raw value * @param value value to look for in the inverted index * @return docIDs in bitmap */ @Override public MutableRoaringBitmap getDocIds(Object value) { String searchQuery = (String) value; MutableRoaringBitmap docIds = new MutableRoaringBitmap(); Collector docIDCollector = new LuceneDocIdCollector(docIds, _docIdTranslator); try { // Lucene Query Parser is JavaCC based. It is stateful and should // be instantiated per query. Analyzer on the other hand is stateless // and can be created upfront. QueryParser parser = new QueryParser(_column, _standardAnalyzer); Query query = parser.parse(searchQuery); _indexSearcher.search(query, docIDCollector); return docIds; } catch (Exception e) { String msg = "Caught excepttion while searching the text index for column:" + _column + " search query:" + searchQuery; throw new RuntimeException(msg, e); } }
Example #10
Source File: LuceneSearcher.java From jpress with GNU Lesser General Public License v3.0 | 6 votes |
private static Query buildQuery(String keyword) { try { Analyzer analyzer = createAnalyzer(); //这里使用text,防止搜索出html的tag或者tag中属性 QueryParser queryParser1 = new QueryParser("text", analyzer); Query termQuery1 = queryParser1.parse(keyword); BooleanClause booleanClause1 = new BooleanClause(termQuery1, BooleanClause.Occur.SHOULD); QueryParser queryParser2 = new QueryParser("title", analyzer); Query termQuery2 = queryParser2.parse(keyword); BooleanClause booleanClause2 = new BooleanClause(termQuery2, BooleanClause.Occur.SHOULD); BooleanQuery.Builder builder = new BooleanQuery.Builder(); builder.add(booleanClause1).add(booleanClause2); return builder.build(); } catch (ParseException e) { LOG.error(e.toString(), e); } return null; }
Example #11
Source File: DefaultLuceneQueryBuilder.java From javaee-lab with Apache License 2.0 | 6 votes |
@Override public Query build(FullTextEntityManager fullTextEntityManager, SearchParameters searchParameters, List<SingularAttribute<?, ?>> availableProperties) { List<String> clauses = getAllClauses(searchParameters, searchParameters.getTerms(), availableProperties); StringBuilder query = new StringBuilder(); query.append("+("); for (String clause : clauses) { if (query.length() > 2) { query.append(" AND "); } query.append(clause); } query.append(")"); if (query.length() == 3) { return null; } //log.debug("Lucene query: {}", query); try { return new QueryParser(availableProperties.get(0).getName(), fullTextEntityManager.getSearchFactory().getAnalyzer("custom")) .parse(query.toString()); } catch (Exception e) { throw propagate(e); } }
Example #12
Source File: UserInputQueryBuilder.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override public Query getQuery(Element e) throws ParserException { String text = DOMUtils.getText(e); try { Query q = null; if (unSafeParser != null) { //synchronize on unsafe parser synchronized (unSafeParser) { q = unSafeParser.parse(text); } } else { String fieldName = DOMUtils.getAttribute(e, "fieldName", defaultField); //Create new parser QueryParser parser = createQueryParser(fieldName, analyzer); q = parser.parse(text); } float boost = DOMUtils.getAttribute(e, "boost", 1.0f); return new BoostQuery(q, boost); } catch (ParseException e1) { throw new ParserException(e1.getMessage()); } }
Example #13
Source File: LuceneCondition.java From stratio-cassandra with Apache License 2.0 | 6 votes |
/** {@inheritDoc} */ @Override public Query query(Schema schema) { if (query == null) { throw new IllegalArgumentException("Query statement required"); } try { Analyzer analyzer = schema.getAnalyzer(); QueryParser queryParser = new QueryParser(defaultField, analyzer); queryParser.setAllowLeadingWildcard(true); queryParser.setLowercaseExpandedTerms(false); Query luceneQuery = queryParser.parse(query); luceneQuery.setBoost(boost); return luceneQuery; } catch (ParseException e) { throw new RuntimeException("Error while parsing lucene syntax query", e); } }
Example #14
Source File: PageDocumentSearcher.java From gravitee-management-rest-api with Apache License 2.0 | 6 votes |
@Override public SearchResult search(io.gravitee.rest.api.service.search.query.Query query) throws TechnicalException { QueryParser parser = new MultiFieldQueryParser(new String[]{ "name", "content" }, analyzer); parser.setFuzzyMinSim(0.6f); try { final Query parse = parser.parse(QueryParserBase.escape(query.getQuery())); BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(parse, BooleanClause.Occur.MUST); bq.add(new TermQuery(new Term(FIELD_TYPE, FIELD_TYPE_VALUE)), BooleanClause.Occur.MUST); return search(bq.build()); } catch (ParseException pe) { logger.error("Invalid query to search for page documents", pe); throw new TechnicalException("Invalid query to search for page documents", pe); } }
Example #15
Source File: LuceneFulltextImplementation.java From ontopia with Apache License 2.0 | 6 votes |
@Override public SearchResultIF search(String query) throws IOException { synchronized (READER_LOCK) { openReader(); IndexSearcher searcher = new IndexSearcher(reader); try { logger.debug("Searching for: '" + query + "'"); Query _query = new QueryParser(defaultField, ANALYZER).parse(query); return new LuceneSearchResult(searcher, searcher.search(_query, Integer.MAX_VALUE)); } catch (org.apache.lucene.queryparser.classic.ParseException e) { logger.error("Error parsing query: '" + e.getMessage() + "'"); throw new IOException(e.getMessage(), e); } } }
Example #16
Source File: GenericFTSLuceneImpl.java From yes-cart with Apache License 2.0 | 6 votes |
/** * {@inheritDoc} */ @Override public List<Long> fullTextSearchRaw(final String query) { final QueryParser queryParser = new QueryParser("", new AsIsAnalyzer(false)); org.apache.lucene.search.Query parsed; try { parsed = queryParser.parse(query); } catch (Exception e) { final String msg = "Cant parse query : " + query + " Error : " + e.getMessage(); LOG.warn(msg); throw new IllegalArgumentException(msg, e); } if (parsed == null) { return Collections.emptyList(); } return fullTextSearch(parsed); }
Example #17
Source File: RetrievalApp.java From lucene4ir with Apache License 2.0 | 6 votes |
public RetrievalApp(String retrievalParamFile){ System.out.println("Retrieval App"); System.out.println("Param File: " + retrievalParamFile); readParamsFromFile(retrievalParamFile); try { reader = DirectoryReader.open(FSDirectory.open( new File(p.indexName).toPath()) ); searcher = new IndexSearcher(reader); // create similarity function and parameter selectSimilarityFunction(sim); searcher.setSimilarity(simfn); parser = new QueryParser(Lucene4IRConstants.FIELD_ALL, analyzer); } catch (Exception e){ System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage()); } }
Example #18
Source File: RetrievalApp.java From lucene4ir with Apache License 2.0 | 6 votes |
public ScoreDoc[] runQuery(String qno, String queryTerms){ ScoreDoc[] hits = null; System.out.println("Query No.: " + qno + " " + queryTerms); try { Query query = parser.parse(QueryParser.escape(queryTerms)); try { TopDocs results = searcher.search(query, p.maxResults); hits = results.scoreDocs; } catch (IOException ioe){ ioe.printStackTrace(); System.exit(1); } } catch (ParseException pe){ pe.printStackTrace(); System.exit(1); } return hits; }
Example #19
Source File: InMemoryLuceneIndex.java From tutorials with MIT License | 6 votes |
public List<Document> searchIndex(String inField, String queryString) { try { Query query = new QueryParser(inField, analyzer).parse(queryString); IndexReader indexReader = DirectoryReader.open(memoryIndex); IndexSearcher searcher = new IndexSearcher(indexReader); TopDocs topDocs = searcher.search(query, 10); List<Document> documents = new ArrayList<>(); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { documents.add(searcher.doc(scoreDoc.doc)); } return documents; } catch (IOException | ParseException e) { e.printStackTrace(); } return null; }
Example #20
Source File: LuceneExample.java From yuzhouwan with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { // index try (Directory index = new NIOFSDirectory(Paths.get("/tmp/index"))) { // add try (IndexWriter writer = new IndexWriter(index, new IndexWriterConfig(new StandardAnalyzer()))) { Document doc = new Document(); doc.add(new TextField("blog", "yuzhouwan.com", Field.Store.YES)); doc.add(new StringField("github", "asdf2014", Field.Store.YES)); writer.addDocument(doc); writer.commit(); } // search try (DirectoryReader reader = DirectoryReader.open(index)) { IndexSearcher searcher = new IndexSearcher(reader); QueryParser parser = new QueryParser("blog", new StandardAnalyzer()); Query query = parser.parse("yuzhouwan.com"); ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; for (ScoreDoc hit : hits) { Document hitDoc = searcher.doc(hit.doc); System.out.println(hitDoc.get("blog")); } } } }
Example #21
Source File: MonitorTestBase.java From lucene-solr with Apache License 2.0 | 5 votes |
public static Query parse(String query) { QueryParser parser = new QueryParser(FIELD, ANALYZER); try { return parser.parse(query); } catch (ParseException e) { throw new IllegalArgumentException(e); } }
Example #22
Source File: NLPIRTokenizerTest.java From nlpir-analysis-cn-ictclas with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { // NLPIR NLPIRTokenizerAnalyzer nta = new NLPIRTokenizerAnalyzer("", 1, "", "", false); // Index IndexWriterConfig inconf = new IndexWriterConfig(nta); inconf.setOpenMode(OpenMode.CREATE_OR_APPEND); IndexWriter index = new IndexWriter(FSDirectory.open(Paths.get("index/")), inconf); Document doc = new Document(); doc.add(new TextField("contents", "特朗普表示,很高兴汉堡会晤后再次同习近平主席通话。我同习主席就重大问题保持沟通和协调、两国加强各层级和各领域交往十分重要。当前,美中关系发展态势良好,我相信可以发展得更好。我期待着对中国进行国事访问。", Field.Store.YES)); index.addDocument(doc); index.flush(); index.close(); // Search String field = "contents"; IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get("index/"))); IndexSearcher searcher = new IndexSearcher(reader); QueryParser parser = new QueryParser(field, nta); Query query = parser.parse("特朗普习近平"); TopDocs top = searcher.search(query, 100); System.out.println("总条数:" + top.totalHits); ScoreDoc[] hits = top.scoreDocs; for (int i = 0; i < hits.length; i++) { System.out.println("doc=" + hits[i].doc + " score=" + hits[i].score); Document d = searcher.doc(hits[i].doc); System.out.println(d.get("contents")); } }
Example #23
Source File: VocabularyNeo4jImpl.java From SciGraph with Apache License 2.0 | 5 votes |
@Override public List<Concept> searchConcepts(Query query) { QueryParser parser = getQueryParser(); // BooleanQuery finalQuery = new BooleanQuery(); Builder finalQueryBuilder = new BooleanQuery.Builder(); try { if (query.isIncludeSynonyms() || query.isIncludeAbbreviations() || query.isIncludeAcronyms()) { // BooleanQuery subQuery = new BooleanQuery(); Builder subQueryBuilder = new BooleanQuery.Builder(); subQueryBuilder.add(LuceneUtils.getBoostedQuery(parser, query.getInput(), 10.0f), Occur.SHOULD); String escapedQuery = QueryParser.escape(query.getInput()); if (query.isIncludeSynonyms()) { subQueryBuilder.add(parser.parse(Concept.SYNONYM + ":" + escapedQuery), Occur.SHOULD); } if (query.isIncludeAbbreviations()) { subQueryBuilder.add(parser.parse(Concept.ABREVIATION + ":" + escapedQuery), Occur.SHOULD); } if (query.isIncludeAcronyms()) { subQueryBuilder.add(parser.parse(Concept.ACRONYM + ":" + escapedQuery), Occur.SHOULD); } finalQueryBuilder.add(subQueryBuilder.build(), Occur.MUST); } else { finalQueryBuilder.add(parser.parse(query.getInput()), Occur.MUST); } } catch (ParseException e) { logger.log(Level.WARNING, "Failed to parse query", e); } addCommonConstraints(finalQueryBuilder, query); IndexHits<Node> hits = null; BooleanQuery finalQuery = finalQueryBuilder.build(); try (Transaction tx = graph.beginTx()) { hits = graph.index().getNodeAutoIndexer().getAutoIndex().query(finalQuery); tx.success(); return limitHits(hits, query); } }
Example #24
Source File: Search.java From dacapobench with Apache License 2.0 | 5 votes |
public void run() throws java.io.IOException { Analyzer analyzer = new StandardAnalyzer(); QueryParser parser = new QueryParser(field, analyzer); while (true) { String line = in.readLine(); if (line == null || line.length() == -1) break; line = line.trim(); if (line.length() == 0) break; Query query = null; try { query = parser.parse(line); } catch (Exception e) { e.printStackTrace(); } searcher.search(query, 10); doPagingSearch(query); } reader.close(); out.flush(); out.close(); synchronized (parent) { parent.completed++; if (parent.completed % 4 == 0) { System.out.println(parent.completed + " query batches completed"); } parent.notify(); } }
Example #25
Source File: VocabularyNeo4jImpl.java From SciGraph with Apache License 2.0 | 5 votes |
static String formatQuery(String format, Object... args) { return format(format, transform(newArrayList(args), new Function<Object, Object>() { @Override public Object apply(Object input) { return input instanceof String ? QueryParser.escape((String) input) .replaceAll(" ", "\\\\ ") : input; } }).toArray()); }
Example #26
Source File: SearchImplTest.java From lucene-solr with Apache License 2.0 | 5 votes |
@Test public void testNextPage() throws Exception { SearchImpl search = new SearchImpl(reader); Query query = new QueryParser("f1", new StandardAnalyzer()).parse("pie"); search.search(query, new SimilarityConfig.Builder().build(), null, 10, true); Optional<SearchResults> opt = search.nextPage(); assertTrue(opt.isPresent()); SearchResults res = opt.get(); assertEquals(20, res.getTotalHits().value); assertEquals(10, res.size()); assertEquals(10, res.getOffset()); }
Example #27
Source File: SearchImplTest.java From lucene-solr with Apache License 2.0 | 5 votes |
@Test public void testSearch() throws Exception { SearchImpl search = new SearchImpl(reader); Query query = new QueryParser("f1", new StandardAnalyzer()).parse("apple"); SearchResults res = search.search(query, new SimilarityConfig.Builder().build(), null, 10, true); assertEquals(10, res.getTotalHits().value); assertEquals(10, res.size()); assertEquals(0, res.getOffset()); }
Example #28
Source File: SearchImplTest.java From lucene-solr with Apache License 2.0 | 5 votes |
@Test public void testSearchWithSort() throws Exception { SearchImpl search = new SearchImpl(reader); Query query = new QueryParser("f1", new StandardAnalyzer()).parse("apple"); Sort sort = new Sort(new SortField("f2", SortField.Type.STRING, true)); SearchResults res = search.search(query, new SimilarityConfig.Builder().build(), sort, null, 10, true); assertEquals(10, res.getTotalHits().value); assertEquals(10, res.size()); assertEquals(0, res.getOffset()); }
Example #29
Source File: TestExtendableQueryParser.java From lucene-solr with Apache License 2.0 | 5 votes |
public QueryParser getParser(Analyzer a, Extensions extensions) throws Exception { if (a == null) a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true); QueryParser qp = extensions == null ? new ExtendableQueryParser( getDefaultField(), a) : new ExtendableQueryParser( getDefaultField(), a, extensions); qp.setDefaultOperator(QueryParserBase.OR_OPERATOR); qp.setSplitOnWhitespace(splitOnWhitespace); return qp; }
Example #30
Source File: SearchImplTest.java From lucene-solr with Apache License 2.0 | 5 votes |
@Test public void testNextPageNoMoreResults() throws Exception { SearchImpl search = new SearchImpl(reader); Query query = new QueryParser("f1", new StandardAnalyzer()).parse("pie"); search.search(query, new SimilarityConfig.Builder().build(), null, 10, true); search.nextPage(); assertFalse(search.nextPage().isPresent()); }