Java Code Examples for org.apache.solr.common.params.SolrParams#getInt()
The following examples show how to use
org.apache.solr.common.params.SolrParams#getInt() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ChildNodeFacetTreeBuilder.java From BioSolr with Apache License 2.0 | 6 votes |
@Override public void initialiseParameters(SolrParams localParams) throws SyntaxError { super.initialiseParameters(localParams); // Initialise the child field - REQUIRED childField = localParams.get(FacetTreeParameters.CHILD_FIELD_PARAM); if (StringUtils.isBlank(childField)) { throw new SyntaxError("Missing child field definition in " + localParams); } // Initialise the optional fields maxLevels = localParams.getInt(FacetTreeParameters.LEVELS_PARAM, 0); docFields.addAll(Arrays.asList(getNodeField(), childField)); if (hasLabelField()) { docFields.add(getLabelField()); } }
Example 2
Source File: PrunerFactory.java From BioSolr with Apache License 2.0 | 6 votes |
public Pruner constructPruner(SolrParams params) throws SyntaxError { Pruner pruner = null; String prunerParam = params.get(PRUNE_PARAM, componentParameters.getDefault(PRUNE_PARAM)); if (StringUtils.isNotBlank(prunerParam)) { if (SIMPLE_PRUNER_VALUE.equals(prunerParam)) { pruner = new SimplePruner(params.getInt(SimplePruner.CHILD_COUNT_PARAM, SimplePruner.MIN_CHILD_COUNT)); } else if (DATAPOINTS_PRUNER_VALUE.equals(prunerParam)) { int dp = params.getInt(DATAPOINTS_PARAM, componentParameters.getIntDefault(DATAPOINTS_PARAM)); if (dp <= 0) { throw new SyntaxError("Datapoints parameter invalid"); } pruner = new DatapointPruner(dp, componentParameters.getDefault(FacetTreeParameters.DATAPOINTS_MORELABEL_PARAM, DatapointPruner.DEFAULT_MORE_LABEL)); } } return pruner; }
Example 3
Source File: SpellcheckComponent.java From customized-symspell with MIT License | 6 votes |
@Override public void prepare(ResponseBuilder rb) throws IOException { SolrParams params = rb.req.getParams(); threshold = params.getInt(Constants.SPELLCHECK_THRESHOLD, 15); if (!params.getBool(COMPONENT_NAME, false)) { return; } try { if (params.getBool(Constants.SPELLCHECK_BUILD, false)) { customSpellCheckListner.reload(rb.req.getSearcher(), spellChecker); rb.rsp.add("command", "build"); } } catch (SpellCheckException ex) { log.error("Unable to build spellcheck indexes"); throw new IOException(ex); } }
Example 4
Source File: GraphQueryParser.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public Query parse() throws SyntaxError { // grab query params and defaults SolrParams localParams = getLocalParams(); Query rootNodeQuery = subQuery(localParams.get(QueryParsing.V), null).getQuery(); String traversalFilterS = localParams.get("traversalFilter"); Query traversalFilter = traversalFilterS == null ? null : subQuery(traversalFilterS, null).getQuery(); // NOTE: the from/to are reversed from {!join} String fromField = localParams.get("from", "node_id"); String toField = localParams.get("to", "edge_ids"); validateFields(fromField); validateFields(toField); // only documents that do not have values in the edge id fields. boolean onlyLeafNodes = localParams.getBool("returnOnlyLeaf", false); // choose if you want to return documents that match the initial query or not. boolean returnRootNodes = localParams.getBool("returnRoot", true); // enable or disable the use of an automaton term for the frontier traversal. int maxDepth = localParams.getInt("maxDepth", -1); // if true, an automaton will be compiled to issue the next graph hop // this avoid having a large number of boolean clauses. (and it's faster too!) boolean useAutn = localParams.getBool("useAutn", false); // Construct a graph query object based on parameters passed in. GraphQuery gq = new GraphQuery(rootNodeQuery, fromField, toField, traversalFilter); // set additional parameters that are not in the constructor. gq.setMaxDepth(maxDepth); gq.setOnlyLeafNodes(onlyLeafNodes); gq.setReturnRoot(returnRootNodes); gq.setUseAutn(useAutn); // return the parsed graph query. return gq; }
Example 5
Source File: JavabinLoader.java From lucene-solr with Apache License 2.0 | 5 votes |
private void delete(SolrQueryRequest req, UpdateRequest update, UpdateRequestProcessor processor) throws IOException { SolrParams params = update.getParams(); DeleteUpdateCommand delcmd = new DeleteUpdateCommand(req); if(params != null) { delcmd.commitWithin = params.getInt(UpdateParams.COMMIT_WITHIN, -1); } if(update.getDeleteByIdMap() != null) { Set<Entry<String,Map<String,Object>>> entries = update.getDeleteByIdMap().entrySet(); for (Entry<String,Map<String,Object>> e : entries) { delcmd.id = e.getKey(); Map<String,Object> map = e.getValue(); if (map != null) { Long version = (Long) map.get("ver"); if (version != null) { delcmd.setVersion(version); } } if (map != null) { String route = (String) map.get(ShardParams._ROUTE_); if (route != null) { delcmd.setRoute(route); } } processor.processDelete(delcmd); delcmd.clear(); } } if(update.getDeleteQuery() != null) { for (String s : update.getDeleteQuery()) { delcmd.query = s; processor.processDelete(delcmd); } } }
Example 6
Source File: XMLLoader.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public XMLLoader init(SolrParams args) { // Init StAX parser: inputFactory = XMLInputFactory.newInstance(); EmptyEntityResolver.configureXMLInputFactory(inputFactory); inputFactory.setXMLReporter(xmllog); try { // The java 1.6 bundled stax parser (sjsxp) does not currently have a thread-safe // XMLInputFactory, as that implementation tries to cache and reuse the // XMLStreamReader. Setting the parser-specific "reuse-instance" property to false // prevents this. // All other known open-source stax parsers (and the bea ref impl) // have thread-safe factories. inputFactory.setProperty("reuse-instance", Boolean.FALSE); } catch (IllegalArgumentException ex) { // Other implementations will likely throw this exception since "reuse-instance" // isimplementation specific. log.debug("Unable to set the 'reuse-instance' property for the input chain: {}", inputFactory); } // Init SAX parser (for XSL): saxFactory = SAXParserFactory.newInstance(); saxFactory.setNamespaceAware(true); // XSL needs this! EmptyEntityResolver.configureSAXParserFactory(saxFactory); xsltCacheLifetimeSeconds = XSLT_CACHE_DEFAULT; if(args != null) { xsltCacheLifetimeSeconds = args.getInt(XSLT_CACHE_PARAM,XSLT_CACHE_DEFAULT); log.debug("xsltCacheLifetimeSeconds={}", xsltCacheLifetimeSeconds); } return this; }
Example 7
Source File: PointType.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override protected void init(IndexSchema schema, Map<String, String> args) { SolrParams p = new MapSolrParams(args); dimension = p.getInt(DIMENSION, DEFAULT_DIMENSION); if (dimension < 1) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The dimension must be > 0: " + dimension); } args.remove(DIMENSION); super.init(schema, args); // cache suffixes createSuffixCache(dimension); }
Example 8
Source File: DisMaxQParser.java From lucene-solr with Apache License 2.0 | 5 votes |
protected SolrPluginUtils.DisjunctionMaxQueryParser getParser(Map<String, Float> fields, String paramName, SolrParams solrParams, float tiebreaker) { int slop = solrParams.getInt(paramName, 0); SolrPluginUtils.DisjunctionMaxQueryParser parser = new SolrPluginUtils.DisjunctionMaxQueryParser(this, IMPOSSIBLE_FIELD_NAME); parser.addAlias(IMPOSSIBLE_FIELD_NAME, tiebreaker, fields); parser.setPhraseSlop(slop); parser.setSplitOnWhitespace(true); return parser; }
Example 9
Source File: DismaxSearchEngineRequestAdapter.java From querqy with Apache License 2.0 | 5 votes |
public DismaxSearchEngineRequestAdapter(final QParser qParser, final SolrQueryRequest request, final String queryString, final SolrParams solrParams, final QuerqyParser querqyParser, final RewriteChain rewriteChain, final InfoLogging infoLogging, final TermQueryCache termQueryCache) { this.qParser = qParser; this.userQueryString = queryString; this.solrParams = solrParams; this.termQueryCache = termQueryCache; this.infoLoggingContext = solrParams.getBool(INFO_LOGGING, false) && infoLogging != null ? new InfoLoggingContext(infoLogging, this) : null; this.querqyParser = querqyParser; this.request = request; this.rewriteChain = rewriteChain; this.context = new HashMap<>(); final int ps0 = solrParams.getInt(PS, 0); final int ps2 = solrParams.getInt(PS2, ps0); final int ps3 = solrParams.getInt(PS3, ps0); final List<FieldParams> phraseFields = SolrPluginUtils .parseFieldBoostsAndSlop(solrParams.getParams(PF),0,ps0); final List<FieldParams> phraseFields2 = SolrPluginUtils .parseFieldBoostsAndSlop(solrParams.getParams(PF2),2,ps2); final List<FieldParams> phraseFields3 = SolrPluginUtils .parseFieldBoostsAndSlop(solrParams.getParams(PF3),3,ps3); allPhraseFields = new ArrayList<>(phraseFields.size() + phraseFields2.size() + phraseFields3.size()); allPhraseFields.addAll(phraseFields); allPhraseFields.addAll(phraseFields2); allPhraseFields.addAll(phraseFields3); minShouldMatch = DisMaxQParser.parseMinShouldMatch(request.getSchema(), solrParams); }
Example 10
Source File: AsyncBuildSuggestComponent.java From SearchServices with GNU Lesser General Public License v3.0 | 5 votes |
/** * Used in Distributed Search, merges the suggestion results from every shard * */ @Override public void finishStage(ResponseBuilder rb) { SolrParams params = rb.req.getParams(); LOG.debug("SuggestComponent finishStage with : " + params); if (!params.getBool(COMPONENT_NAME, false) || rb.stage != ResponseBuilder.STAGE_GET_FIELDS) return; int count = params.getInt(SUGGEST_COUNT, 1); List<SuggesterResult> suggesterResults = new ArrayList<>(); // Collect Shard responses for (ShardRequest sreq : rb.finished) { for (ShardResponse srsp : sreq.responses) { NamedList<Object> resp; if((resp = srsp.getSolrResponse().getResponse()) != null) { @SuppressWarnings("unchecked") Map<String, SimpleOrderedMap<NamedList<Object>>> namedList = (Map<String, SimpleOrderedMap<NamedList<Object>>>) resp.get(SuggesterResultLabels.SUGGEST); LOG.debug(srsp.getShard() + " : " + namedList); suggesterResults.add(toSuggesterResult(namedList)); } } } // Merge Shard responses SuggesterResult suggesterResult = merge(suggesterResults, count); Map<String, SimpleOrderedMap<NamedList<Object>>> namedListResults = new HashMap<>(); toNamedList(suggesterResult, namedListResults); rb.rsp.add(SuggesterResultLabels.SUGGEST, namedListResults); }
Example 11
Source File: TextProfileSignature.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override public void init(SolrParams params) { quantRate = params.getFloat("quantRate", 0.01f); minTokenLen = params.getInt("minTokenLen", 2); }
Example 12
Source File: MoreLikeThisHandler.java From lucene-solr with Apache License 2.0 | 4 votes |
public MoreLikeThisHelper( SolrParams params, SolrIndexSearcher searcher ) { this.searcher = searcher; this.reader = searcher.getIndexReader(); this.uniqueKeyField = searcher.getSchema().getUniqueKeyField(); this.needDocSet = params.getBool(FacetParams.FACET,false); SolrParams required = params.required(); String[] fl = required.getParams(MoreLikeThisParams.SIMILARITY_FIELDS); List<String> list = new ArrayList<>(); for (String f : fl) { if (!StringUtils.isEmpty(f)) { String[] strings = splitList.split(f); for (String string : strings) { if (!StringUtils.isEmpty(string)) { list.add(string); } } } } String[] fields = list.toArray(new String[list.size()]); if( fields.length < 1 ) { throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "MoreLikeThis requires at least one similarity field: "+MoreLikeThisParams.SIMILARITY_FIELDS ); } this.mlt = new MoreLikeThis( reader ); // TODO -- after LUCENE-896, we can use , searcher.getSimilarity() ); mlt.setFieldNames(fields); mlt.setAnalyzer( searcher.getSchema().getIndexAnalyzer() ); // configurable params mlt.setMinTermFreq( params.getInt(MoreLikeThisParams.MIN_TERM_FREQ, MoreLikeThis.DEFAULT_MIN_TERM_FREQ)); mlt.setMinDocFreq( params.getInt(MoreLikeThisParams.MIN_DOC_FREQ, MoreLikeThis.DEFAULT_MIN_DOC_FREQ)); mlt.setMaxDocFreq( params.getInt(MoreLikeThisParams.MAX_DOC_FREQ, MoreLikeThis.DEFAULT_MAX_DOC_FREQ)); mlt.setMinWordLen( params.getInt(MoreLikeThisParams.MIN_WORD_LEN, MoreLikeThis.DEFAULT_MIN_WORD_LENGTH)); mlt.setMaxWordLen( params.getInt(MoreLikeThisParams.MAX_WORD_LEN, MoreLikeThis.DEFAULT_MAX_WORD_LENGTH)); mlt.setMaxQueryTerms( params.getInt(MoreLikeThisParams.MAX_QUERY_TERMS, MoreLikeThis.DEFAULT_MAX_QUERY_TERMS)); mlt.setMaxNumTokensParsed(params.getInt(MoreLikeThisParams.MAX_NUM_TOKENS_PARSED, MoreLikeThis.DEFAULT_MAX_NUM_TOKENS_PARSED)); mlt.setBoost( params.getBool(MoreLikeThisParams.BOOST, false ) ); // There is no default for maxDocFreqPct. Also, it's a bit oddly expressed as an integer value // (percentage of the collection's documents count). We keep Lucene's convention here. if (params.getInt(MoreLikeThisParams.MAX_DOC_FREQ_PCT) != null) { mlt.setMaxDocFreqPct(params.getInt(MoreLikeThisParams.MAX_DOC_FREQ_PCT)); } boostFields = SolrPluginUtils.parseFieldBoosts(params.getParams(MoreLikeThisParams.QF)); }
Example 13
Source File: SuggestComponent.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * Responsible for using the specified suggester to get the suggestions * for the query and write the results * */ @Override public void process(ResponseBuilder rb) throws IOException { SolrParams params = rb.req.getParams(); log.info("SuggestComponent process with : {}", params); if (!params.getBool(COMPONENT_NAME, false) || suggesters.isEmpty()) { return; } boolean buildAll = params.getBool(SUGGEST_BUILD_ALL, false); boolean reloadAll = params.getBool(SUGGEST_RELOAD_ALL, false); Set<SolrSuggester> querySuggesters; try { querySuggesters = getSuggesters(params); } catch(SolrException ex) { if (!buildAll && !reloadAll) { throw ex; } else { querySuggesters = new HashSet<>(); } } String query = params.get(SUGGEST_Q); if (query == null) { query = rb.getQueryString(); if (query == null) { query = params.get(CommonParams.Q); } } if (query != null) { int count = params.getInt(SUGGEST_COUNT, 1); boolean highlight = params.getBool(SUGGEST_HIGHLIGHT, false); boolean allTermsRequired = params.getBool(SUGGEST_ALL_TERMS_REQUIRED, true); String contextFilter = params.get(SUGGEST_CONTEXT_FILTER_QUERY); if (contextFilter != null) { contextFilter = contextFilter.trim(); if (contextFilter.length() == 0) { contextFilter = null; } } SuggesterOptions options = new SuggesterOptions(new CharsRef(query), count, contextFilter, allTermsRequired, highlight); Map<String, SimpleOrderedMap<NamedList<Object>>> namedListResults = new HashMap<>(); for (SolrSuggester suggester : querySuggesters) { SuggesterResult suggesterResult = suggester.getSuggestions(options); toNamedList(suggesterResult, namedListResults); } rb.rsp.add(SuggesterResultLabels.SUGGEST, namedListResults); } }
Example 14
Source File: StatsField.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * Creates an HllOptions based on the (local) params specified (if appropriate). * * @param localParams the LocalParams for this {@link StatsField} * @param field the field corresponding to this {@link StatsField}, may be null if these stats are over a value source * @return the {@link HllOptions} to use based on the params, or null if no {@link HLL} should be computed * @throws SolrException if there are invalid options */ public static HllOptions parseHllOptions(SolrParams localParams, SchemaField field) throws SolrException { String cardinalityOpt = localParams.get(Stat.cardinality.name()); if (StringUtils.isBlank(cardinalityOpt)) { return null; } final NumberType hashableNumType = getHashableNumericType(field); // some sane defaults int log2m = 13; // roughly equivalent to "cardinality='0.33'" int regwidth = 6; // with decent hash, this is plenty for all valid long hashes if (NumberType.FLOAT.equals(hashableNumType) || NumberType.INTEGER.equals(hashableNumType)) { // for 32bit values, we can adjust our default regwidth down a bit regwidth--; // NOTE: EnumField uses LegacyNumericType.INT, and in theory we could be super conservative // with it, but there's no point - just let the EXPLICIT HLL handle it } // TODO: we could attempt additional reductions in the default regwidth based on index // statistics -- but thta doesn't seem worth the effort. for tiny indexes, the // EXPLICIT and SPARSE HLL representations have us nicely covered, and in general we don't // want to be too aggresive about lowering regwidth or we could really poor results if // log2m is also low and there is heavy hashkey collision try { // NFE will short out here if it's not a number final double accuracyOpt = Double.parseDouble(cardinalityOpt); // if a float between 0 and 1 is specified, treat it as a prefrence of accuracy // - 0 means accuracy is not a concern, save RAM // - 1 means be as accurate as possible, using as much RAM as needed. if (accuracyOpt < 0D || 1.0D < accuracyOpt) { throw new SolrException(ErrorCode.BAD_REQUEST, ERR); } // use accuracyOpt as a scaling factor between min & max legal log2m values log2m = HLL.MINIMUM_LOG2M_PARAM + (int) Math.round(accuracyOpt * (HLL.MAXIMUM_LOG2M_PARAM - HLL.MINIMUM_LOG2M_PARAM)); // use accuracyOpt as a scaling factor for regwidth as well, BUT... // be more conservative -- HLL.MIN_REGWIDTH_PARAM is too absurdly low to be useful // use previously computed (hashableNumType) default regwidth -1 as lower bound for scaling final int MIN_HUERISTIC_REGWIDTH = regwidth-1; regwidth = MIN_HUERISTIC_REGWIDTH + (int) Math.round(accuracyOpt * (HLL.MAXIMUM_REGWIDTH_PARAM - MIN_HUERISTIC_REGWIDTH)); } catch (NumberFormatException nfe) { // param value isn't a number -- let's check for simple true/false if (! localParams.getBool(Stat.cardinality.name(), false)) { return null; } } // let explicit params override both the default and/or any accuracy specification log2m = localParams.getInt("hllLog2m", log2m); regwidth = localParams.getInt("hllRegwidth", regwidth); // validate legal values if (log2m < HLL.MINIMUM_LOG2M_PARAM || HLL.MAXIMUM_LOG2M_PARAM < log2m) { throw new SolrException(ErrorCode.BAD_REQUEST, "hllLog2m must be at least " + HLL.MINIMUM_LOG2M_PARAM + " and at most " + HLL.MAXIMUM_LOG2M_PARAM + " (" + log2m +")"); } if (regwidth < HLL.MINIMUM_REGWIDTH_PARAM || HLL.MAXIMUM_REGWIDTH_PARAM < regwidth) { throw new SolrException(ErrorCode.BAD_REQUEST, "hllRegwidth must be at least " + HLL.MINIMUM_REGWIDTH_PARAM + " and at most " + HLL.MAXIMUM_REGWIDTH_PARAM); } HashFunction hasher = localParams.getBool("hllPreHashed", false) ? null : Hashing.murmur3_128(); if (null == hasher) { // if this is a function, or a non Long field, pre-hashed is invalid // NOTE: we ignore hashableNumType - it's LONG for non numerics like Strings if (null == field || !(NumberType.LONG.equals(field.getType().getNumberType()) || NumberType.DATE.equals(field.getType().getNumberType()))) { throw new SolrException(ErrorCode.BAD_REQUEST, "hllPreHashed is only supported with Long based fields"); } } // if we're still here, then we need an HLL... return new HllOptions(log2m, regwidth, hasher); }
Example 15
Source File: ZookeeperInfoHandler.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override @SuppressWarnings({"unchecked"}) public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { final SolrParams params = req.getParams(); Map<String, String> map = new HashMap<>(1); map.put(WT, "raw"); map.put(OMIT_HEADER, "true"); req.setParams(SolrParams.wrapDefaults(new MapSolrParams(map), params)); synchronized (this) { if (pagingSupport == null) { pagingSupport = new PagedCollectionSupport(); ZkController zkController = cores.getZkController(); if (zkController != null) { // get notified when the ZK session expires (so we can clear the cached collections and rebuild) zkController.addOnReconnectListener(pagingSupport); } } } String path = params.get(PATH); if (params.get("addr") != null) { throw new SolrException(ErrorCode.BAD_REQUEST, "Illegal parameter \"addr\""); } String detailS = params.get("detail"); boolean detail = detailS != null && detailS.equals("true"); String dumpS = params.get("dump"); boolean dump = dumpS != null && dumpS.equals("true"); int start = params.getInt("start", 0); // Note start ignored if rows not specified int rows = params.getInt("rows", -1); String filterType = params.get("filterType"); if (filterType != null) { filterType = filterType.trim().toLowerCase(Locale.ROOT); if (filterType.length() == 0) filterType = null; } FilterType type = (filterType != null) ? FilterType.valueOf(filterType) : FilterType.none; String filter = (type != FilterType.none) ? params.get("filter") : null; if (filter != null) { filter = filter.trim(); if (filter.length() == 0) filter = null; } ZKPrinter printer = new ZKPrinter(cores.getZkController()); printer.detail = detail; printer.dump = dump; boolean isGraphView = "graph".equals(params.get("view")); // There is no znode /clusterstate.json (removed in Solr 9), but we do as if there's one and return collection listing // Need to change services.js if cleaning up here, collection list is used from Admin UI Cloud - Graph boolean paginateCollections = (isGraphView && "/clusterstate.json".equals(path)); printer.page = paginateCollections ? new PageOfCollections(start, rows, type, filter) : null; printer.pagingSupport = pagingSupport; try { if (paginateCollections) { // List collections and allow pagination, but no specific znode info like when looking at a normal ZK path printer.printPaginatedCollections(); } else { printer.print(path); } } finally { printer.close(); } rsp.getValues().add(RawResponseWriter.CONTENT,printer); }
Example 16
Source File: AsyncBuildSuggestComponent.java From SearchServices with GNU Lesser General Public License v3.0 | 4 votes |
/** * Responsible for using the specified suggester to get the suggestions * for the query and write the results * */ @Override public void process(ResponseBuilder rb) throws IOException { SolrParams params = rb.req.getParams(); LOG.debug("SuggestComponent process with : " + params); if (!params.getBool(COMPONENT_NAME, false) || suggesters.isEmpty()) { return; } boolean buildAll = params.getBool(SUGGEST_BUILD_ALL, false); boolean reloadAll = params.getBool(SUGGEST_RELOAD_ALL, false); Set<SolrSuggester> querySuggesters; try { querySuggesters = getSuggesters(params); } catch(IllegalArgumentException ex) { if (!buildAll && !reloadAll) { throw ex; } else { querySuggesters = new HashSet<>(); } } String query = params.get(SUGGEST_Q); if (query == null) { query = rb.getQueryString(); if (query == null) { query = params.get(CommonParams.Q); } } if (query != null) { int count = params.getInt(SUGGEST_COUNT, 1); boolean highlight = params.getBool(SUGGEST_HIGHLIGHT, false); boolean allTermsRequired = params.getBool( SUGGEST_ALL_TERMS_REQUIRED, true); String contextFilter = params.get(SUGGEST_CONTEXT_FILTER_QUERY); if (contextFilter != null) { contextFilter = contextFilter.trim(); if (contextFilter.length() == 0) { contextFilter = null; } } SuggesterOptions options = new SuggesterOptions(new CharsRef(query), count, contextFilter, allTermsRequired, highlight); Map<String, SimpleOrderedMap<NamedList<Object>>> namedListResults = new HashMap<>(); for (SolrSuggester suggester : querySuggesters) { SuggesterResult suggesterResult = suggester.getSuggestions(options); toNamedList(suggesterResult, namedListResults); } rb.rsp.add(SuggesterResultLabels.SUGGEST, namedListResults); } }
Example 17
Source File: DirectSolrSpellChecker.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override @SuppressWarnings({"unchecked"}) public String init(@SuppressWarnings({"rawtypes"})NamedList config, SolrCore core) { SolrParams params = config.toSolrParams(); log.info("init: {}", config); String name = super.init(config, core); Comparator<SuggestWord> comp = SuggestWordQueue.DEFAULT_COMPARATOR; String compClass = (String) config.get(COMPARATOR_CLASS); if (compClass != null) { if (compClass.equalsIgnoreCase(SCORE_COMP)) comp = SuggestWordQueue.DEFAULT_COMPARATOR; else if (compClass.equalsIgnoreCase(FREQ_COMP)) comp = new SuggestWordFrequencyComparator(); else //must be a FQCN comp = (Comparator<SuggestWord>) core.getResourceLoader().newInstance(compClass, Comparator.class); } StringDistance sd = DirectSpellChecker.INTERNAL_LEVENSHTEIN; String distClass = (String) config.get(STRING_DISTANCE); if (distClass != null && !distClass.equalsIgnoreCase(INTERNAL_DISTANCE)) sd = core.getResourceLoader().newInstance(distClass, StringDistance.class); float minAccuracy = DEFAULT_ACCURACY; Float accuracy = params.getFloat(ACCURACY); if (accuracy != null) minAccuracy = accuracy; int maxEdits = DEFAULT_MAXEDITS; Integer edits = params.getInt(MAXEDITS); if (edits != null) maxEdits = edits; int minPrefix = DEFAULT_MINPREFIX; Integer prefix = params.getInt(MINPREFIX); if (prefix != null) minPrefix = prefix; int maxInspections = DEFAULT_MAXINSPECTIONS; Integer inspections = params.getInt(MAXINSPECTIONS); if (inspections != null) maxInspections = inspections; float minThreshold = DEFAULT_THRESHOLD_TOKEN_FREQUENCY; Float threshold = params.getFloat(THRESHOLD_TOKEN_FREQUENCY); if (threshold != null) minThreshold = threshold; int minQueryLength = DEFAULT_MINQUERYLENGTH; Integer queryLength = params.getInt(MINQUERYLENGTH); if (queryLength != null) minQueryLength = queryLength; int maxQueryLength = DEFAULT_MAXQUERYLENGTH; Integer overriddenMaxQueryLength = params.getInt(MAXQUERYLENGTH); if (overriddenMaxQueryLength != null) maxQueryLength = overriddenMaxQueryLength; float maxQueryFrequency = DEFAULT_MAXQUERYFREQUENCY; Float queryFreq = params.getFloat(MAXQUERYFREQUENCY); if (queryFreq != null) maxQueryFrequency = queryFreq; checker.setComparator(comp); checker.setDistance(sd); checker.setMaxEdits(maxEdits); checker.setMinPrefix(minPrefix); checker.setAccuracy(minAccuracy); checker.setThresholdFrequency(minThreshold); checker.setMaxInspections(maxInspections); checker.setMinQueryLength(minQueryLength); checker.setMaxQueryLength(maxQueryLength); checker.setMaxQueryFrequency(maxQueryFrequency); checker.setLowerCaseTerms(false); return name; }
Example 18
Source File: DistribCursorPagingTest.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * <p> * Given a set of params, executes a cursor query using {@link CursorMarkParams#CURSOR_MARK_START} * and then continuously walks the results using {@link CursorMarkParams#CURSOR_MARK_START} as long * as a non-0 number of docs ar returned. This method records the the set of all id's * (must be positive ints) encountered and throws an assertion failure if any id is * encountered more then once, or if the set grows above maxSize * </p> * * <p> * Note that this method explicitly uses the "cloudClient" for executing the queries, * instead of relying on the test infrastructure to execute the queries redundently * against both the cloud client as well as a control client. This is because term stat * differences in a sharded setup can result in different scores for documents compared * to the control index -- which can affect the sorting in some cases and cause false * negatives in the response comparisons (even if we don't include "score" in the "fl") * </p> */ public SentinelIntSet assertFullWalkNoDups(int maxSize, SolrParams params) throws Exception { SentinelIntSet ids = new SentinelIntSet(maxSize, -1); String cursorMark = CURSOR_MARK_START; int docsOnThisPage = Integer.MAX_VALUE; while (0 < docsOnThisPage) { final SolrParams p = p(params, CURSOR_MARK_PARAM, cursorMark); QueryResponse rsp = cloudClient.query(p); String nextCursorMark = assertHashNextCursorMark(rsp); SolrDocumentList docs = extractDocList(rsp); docsOnThisPage = docs.size(); if (null != params.getInt(CommonParams.ROWS)) { int rows = params.getInt(CommonParams.ROWS); assertTrue("Too many docs on this page: " + rows + " < " + docsOnThisPage, docsOnThisPage <= rows); } if (0 == docsOnThisPage) { assertEquals("no more docs, but "+CURSOR_MARK_NEXT+" isn't same", cursorMark, nextCursorMark); } for (SolrDocument doc : docs) { int id = Integer.parseInt(doc.getFieldValue("id").toString()); if (ids.exists(id)) { String msg = "(" + p + ") walk already seen: " + id; try { queryAndCompareShards(params("distrib","false", "q","id:"+id)); } catch (AssertionError ae) { throw new AssertionError(msg + ", found shard inconsistency that would explain it...", ae); } rsp = cloudClient.query(params("q","id:"+id)); throw new AssertionError(msg + ", don't know why; q=id:"+id+" gives: " + rsp.toString()); } ids.put(id); assertFalse("id set bigger then max allowed ("+maxSize+"): " + ids.size(), maxSize < ids.size()); } cursorMark = nextCursorMark; } return ids; }
Example 19
Source File: SpellCheckComponent.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override @SuppressWarnings("unchecked") public void process(ResponseBuilder rb) throws IOException { SolrParams params = rb.req.getParams(); if (!params.getBool(COMPONENT_NAME, false) || spellCheckers.isEmpty()) { return; } boolean shardRequest = "true".equals(params.get(ShardParams.IS_SHARD)); String q = params.get(SPELLCHECK_Q); SolrSpellChecker spellChecker = getSpellChecker(params); Collection<Token> tokens = null; if (q != null) { //we have a spell check param, tokenize it with the query analyzer applicable for this spellchecker tokens = getTokens(q, spellChecker.getQueryAnalyzer()); } else { q = rb.getQueryString(); if (q == null) { q = params.get(CommonParams.Q); } tokens = queryConverter.convert(q); } if (tokens != null && tokens.isEmpty() == false) { if (spellChecker != null) { int count = params.getInt(SPELLCHECK_COUNT, 1); boolean onlyMorePopular = params.getBool(SPELLCHECK_ONLY_MORE_POPULAR, DEFAULT_ONLY_MORE_POPULAR); boolean extendedResults = params.getBool(SPELLCHECK_EXTENDED_RESULTS, false); boolean collate = params.getBool(SPELLCHECK_COLLATE, false); float accuracy = params.getFloat(SPELLCHECK_ACCURACY, Float.MIN_VALUE); int alternativeTermCount = params.getInt(SpellingParams.SPELLCHECK_ALTERNATIVE_TERM_COUNT, 0); //If specified, this can be a discrete # of results, or a percentage of fq results. Integer maxResultsForSuggest = maxResultsForSuggest(rb); ModifiableSolrParams customParams = new ModifiableSolrParams(); for (String checkerName : getDictionaryNames(params)) { customParams.add(getCustomParams(checkerName, params)); } Number hitsLong = (Number) rb.rsp.getToLog().get("hits"); long hits = 0; if (hitsLong == null) { hits = rb.getNumberDocumentsFound(); } else { hits = hitsLong.longValue(); } SpellingResult spellingResult = null; if (maxResultsForSuggest == null || hits <= maxResultsForSuggest) { SuggestMode suggestMode = SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX; if (onlyMorePopular) { suggestMode = SuggestMode.SUGGEST_MORE_POPULAR; } else if (alternativeTermCount > 0) { suggestMode = SuggestMode.SUGGEST_ALWAYS; } IndexReader reader = rb.req.getSearcher().getIndexReader(); SpellingOptions options = new SpellingOptions(tokens, reader, count, alternativeTermCount, suggestMode, extendedResults, accuracy, customParams); spellingResult = spellChecker.getSuggestions(options); } else { spellingResult = new SpellingResult(); } boolean isCorrectlySpelled = hits > (maxResultsForSuggest==null ? 0 : maxResultsForSuggest); @SuppressWarnings({"rawtypes"}) NamedList response = new SimpleOrderedMap(); @SuppressWarnings({"rawtypes"}) NamedList suggestions = toNamedList(shardRequest, spellingResult, q, extendedResults); response.add("suggestions", suggestions); if (extendedResults) { response.add("correctlySpelled", isCorrectlySpelled); } if (collate) { addCollationsToResponse(params, spellingResult, rb, q, response, spellChecker.isSuggestionsMayOverlap()); } if (shardRequest) { addOriginalTermsToResponse(response, tokens); } rb.rsp.add("spellcheck", response); } else { throw new SolrException(SolrException.ErrorCode.NOT_FOUND, "Specified dictionaries do not exist: " + getDictionaryNameAsSingleString(getDictionaryNames(params))); } } }
Example 20
Source File: PagerComponent.java From dubbox with Apache License 2.0 | 4 votes |
@Override @SuppressWarnings("unchecked") public void process(ResponseBuilder rb) throws IOException { /* get request params */ SolrParams par = rb.req.getParams(); int rows = par.getInt(CommonParams.ROWS, 0); int start = par.getInt(CommonParams.START, 0); int pages = par.getInt(PARAM_PAGER, 0); int pages_pre = par.getInt(PARAM_PAGER_PRE, 2); /* neet to work ? */ if (pages == 0 || rows == 0) return; /* pager list */ NamedList lst = new SimpleOrderedMap<Object>(); NamedList lst2 = new SimpleOrderedMap<Object>(); /* paging pages */ int doc_count = rb.getResults().docSet.size(); int page_count = doc_count / rows; int page_actual = start / rows; int page_pre = pages_pre; int page_post = pages - page_pre - 1; /* page range */ if (page_actual - page_pre < 0) { page_post += -(page_actual - page_pre); page_pre -= -(page_actual - page_pre); } else if (page_actual + page_post > page_count) { page_post = pages - page_pre; page_pre = page_actual + pages - page_count; } /* sanity */ if (page_pre < 0) page_pre = 0; if (page_post < 0) page_post = 0; /* next pages list */ int i = (page_actual - page_pre); for (i = (i <= 0 ? 0 : i); i <= page_count && i <= (page_actual + page_post); i++) lst2.add(Integer.toString(i + 1), i * rows); lst.add("pages", lst2); /* navi */ if (page_actual > 0) lst.add("prev", (page_actual - 1) * rows); if (page_actual - page_pre > 0) lst.add("first", 0); if (page_actual < page_count) lst.add("next", (page_actual + 1) * rows); if (page_actual + page_post < page_count) lst.add("last", page_count * rows); lst.add("actual", page_actual + 1); lst.add("count", page_count); /* finish */ rb.rsp.add("pager", lst); }