Java Code Examples for org.apache.solr.search.SolrIndexSearcher#getSchema()
The following examples show how to use
org.apache.solr.search.SolrIndexSearcher#getSchema() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ResponseLogComponent.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override public void process(ResponseBuilder rb) throws IOException { SolrParams params = rb.req.getParams(); if (!params.getBool(COMPONENT_NAME, false)) return; SolrIndexSearcher searcher = rb.req.getSearcher(); IndexSchema schema = searcher.getSchema(); if (schema.getUniqueKeyField() == null) return; ResultContext rc = (ResultContext) rb.rsp.getResponse(); DocList docs = rc.getDocList(); if (docs.hasScores()) { processScores(rb, docs, schema, searcher); } else { processIds(rb, docs, schema, searcher); } }
Example 2
Source File: UnifiedSolrHighlighter.java From lucene-solr with Apache License 2.0 | 6 votes |
/** * Retrieves the unique keys for the topdocs to key the results */ protected String[] getUniqueKeys(SolrIndexSearcher searcher, int[] docIDs) throws IOException { IndexSchema schema = searcher.getSchema(); SchemaField keyField = schema.getUniqueKeyField(); if (keyField != null) { SolrReturnFields returnFields = new SolrReturnFields(keyField.getName(), null); String[] uniqueKeys = new String[docIDs.length]; for (int i = 0; i < docIDs.length; i++) { int docid = docIDs[i]; SolrDocument solrDoc = searcher.getDocFetcher().solrDoc(docid, returnFields); uniqueKeys[i] = schema.printableUniqueKey(solrDoc); } return uniqueKeys; } else { return new String[docIDs.length]; } }
Example 3
Source File: QueryAutoFilteringComponent.java From query-autofiltering-component with Apache License 2.0 | 6 votes |
private ArrayList<String> getStringFields( SolrIndexSearcher searcher ) { IndexSchema schema = searcher.getSchema(); ArrayList<String> strFields = new ArrayList<String>( ); Collection<String> fieldNames = searcher.getFieldNames(); Iterator<String> fnIt = fieldNames.iterator(); while ( fnIt.hasNext() ) { String fieldName = fnIt.next( ); if (excludeFields == null || !excludeFields.contains( fieldName )) { SchemaField field = schema.getField(fieldName); if (field.stored() && field.getType() instanceof StrField ) { strFields.add( fieldName ); } } } return strFields; }
Example 4
Source File: QueryAutoFilteringComponent.java From query-autofiltering-component with Apache License 2.0 | 6 votes |
private ArrayList<String> getStringFields( SolrIndexSearcher searcher ) { IndexSchema schema = searcher.getSchema(); ArrayList<String> strFields = new ArrayList<String>( ); Collection<String> fieldNames = searcher.getFieldNames(); Iterator<String> fnIt = fieldNames.iterator(); while ( fnIt.hasNext() ) { String fieldName = fnIt.next( ); if (excludeFields == null || !excludeFields.contains( fieldName )) { try { SchemaField field = schema.getField(fieldName); if (field.stored() && field.getType() instanceof StrField ) { strFields.add( fieldName ); } } catch (Throwable e ) { } } } return strFields; }
Example 5
Source File: SolrPluginUtils.java From lucene-solr with Apache License 2.0 | 5 votes |
@SuppressWarnings({"unchecked"}) public static void doStandardResultsDebug( SolrQueryRequest req, Query query, DocList results, boolean dbgResults, @SuppressWarnings({"rawtypes"})NamedList dbg) throws IOException { if (dbgResults) { SolrIndexSearcher searcher = req.getSearcher(); IndexSchema schema = searcher.getSchema(); boolean explainStruct = req.getParams().getBool(CommonParams.EXPLAIN_STRUCT, false); if (results != null) { NamedList<Explanation> explain = getExplanations(query, results, searcher, schema); dbg.add("explain", explainStruct ? explanationsToNamedLists(explain) : explanationsToStrings(explain)); } String otherQueryS = req.getParams().get(CommonParams.EXPLAIN_OTHER); if (otherQueryS != null && otherQueryS.length() > 0) { DocList otherResults = doSimpleQuery(otherQueryS, req, 0, 10); dbg.add("otherQuery", otherQueryS); NamedList<Explanation> explainO = getExplanations(query, otherResults, searcher, schema); dbg.add("explainOther", explainStruct ? explanationsToNamedLists(explainO) : explanationsToStrings(explainO)); } } }
Example 6
Source File: DeepPagingIterator.java From SolRDF with Apache License 2.0 | 5 votes |
/** * Builds a new iterator with the given data. * * @param searcher the Solr index searcher. * @param queryCommand the query command that will be submitted.static * @param sort the sort specs. * @param consumer the Graph event consumer that will be notified on relevant events. */ DeepPagingIterator( final SolrIndexSearcher searcher, final SolrIndexSearcher.QueryCommand queryCommand, final SortSpec sort, final GraphEventConsumer consumer) { this.searcher = searcher; this.queryCommand = queryCommand; this.sentCursorMark = new CursorMark(searcher.getSchema(), sort); this.queryCommand.setCursorMark(sentCursorMark); this.consumer = consumer; }
Example 7
Source File: AbstractFacetTreeBuilder.java From BioSolr with Apache License 2.0 | 5 votes |
protected void checkFieldsInSchema(SolrIndexSearcher searcher, Collection<String> fields) throws SolrException { IndexSchema schema = searcher.getSchema(); for (String field : fields) { SchemaField sField = schema.getField(field); if (sField == null) { throw new SolrException(ErrorCode.BAD_REQUEST, "\"" + field + "\" is not in schema " + schema.getSchemaName()); } } }
Example 8
Source File: AlfrescoLukeRequestHandler.java From SearchServices with GNU Lesser General Public License v3.0 | 4 votes |
private static SimpleOrderedMap<Object> getIndexedFieldsInfo( SolrQueryRequest req) throws Exception { SolrIndexSearcher searcher = req.getSearcher(); SolrParams params = req.getParams(); Set<String> fields = null; String fl = params.get(CommonParams.FL); if (fl != null) { fields = new TreeSet<>(Arrays.asList(fl.split("[,\\s]+"))); } LeafReader reader = searcher.getSlowAtomicReader(); IndexSchema schema = searcher.getSchema(); // Don't be tempted to put this in the loop below, the whole point here // is to alphabetize the fields! Set<String> fieldNames = new TreeSet<>(); for (FieldInfo fieldInfo : reader.getFieldInfos()) { fieldNames.add(fieldInfo.name); } // Walk the term enum and keep a priority queue for each map in our set SimpleOrderedMap<Object> vInfo = new SimpleOrderedMap<>(); SimpleOrderedMap<Object> aInfo = new SimpleOrderedMap<>(); for (String fieldName : fieldNames) { if (fields != null && !fields.contains(fieldName) && !fields.contains("*")) { continue; // we're not interested in this field Still an issue // here } SimpleOrderedMap<Object> fieldMap = new SimpleOrderedMap<>(); SchemaField sfield = schema.getFieldOrNull(fieldName); FieldType ftype = (sfield == null) ? null : sfield.getType(); fieldMap.add("type", (ftype == null) ? null : ftype.getTypeName()); fieldMap.add("schema", getFieldFlags(sfield)); if (sfield != null && schema.isDynamicField(sfield.getName()) && schema.getDynamicPattern(sfield.getName()) != null) { fieldMap.add("dynamicBase", schema.getDynamicPattern(sfield.getName())); } Terms terms = reader.fields().terms(fieldName); if (terms == null) { // Not indexed, so we need to report what we // can (it made it through the fl param if // specified) vInfo.add(AlfrescoSolrDataModel.getInstance() .getAlfrescoPropertyFromSchemaField(fieldName), fieldMap); aInfo.add(fieldName, fieldMap); continue; } if (sfield != null && sfield.indexed()) { if (params.getBool(INCLUDE_INDEX_FIELD_FLAGS, true)) { Document doc = getFirstLiveDoc(terms, reader); if (doc != null) { // Found a document with this field try { IndexableField fld = doc.getField(fieldName); if (fld != null) { fieldMap.add("index", getFieldFlags(fld)); } else { // it is a non-stored field... fieldMap.add("index", "(unstored field)"); } } catch (Exception ex) { log.warn("error reading field: " + fieldName); } } } fieldMap.add("docs", terms.getDocCount()); } if (fields != null && (fields.contains(fieldName) || fields.contains("*"))) { getDetailedFieldInfo(req, fieldName, fieldMap); } // Add the field vInfo.add(fieldName, fieldMap); aInfo.add(AlfrescoSolrDataModel.getInstance() .getAlfrescoPropertyFromSchemaField(fieldName), fieldMap); } SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<>(); finfo.addAll(vInfo); // finfo.add("mimetype()", finfo.get("cm:content.mimetype")); // finfo.add("contentSize()", finfo.get("cm:content.size")); finfo.addAll(aInfo); return finfo; }
Example 9
Source File: LukeRequestHandler.java From lucene-solr with Apache License 2.0 | 4 votes |
private static SimpleOrderedMap<Object> getIndexedFieldsInfo(SolrQueryRequest req) throws Exception { SolrIndexSearcher searcher = req.getSearcher(); SolrParams params = req.getParams(); Set<String> fields = null; String fl = params.get(CommonParams.FL); if (fl != null) { fields = new TreeSet<>(Arrays.asList(fl.split( "[,\\s]+" ))); } LeafReader reader = searcher.getSlowAtomicReader(); IndexSchema schema = searcher.getSchema(); // Don't be tempted to put this in the loop below, the whole point here is to alphabetize the fields! Set<String> fieldNames = new TreeSet<>(); for(FieldInfo fieldInfo : reader.getFieldInfos()) { fieldNames.add(fieldInfo.name); } // Walk the term enum and keep a priority queue for each map in our set SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<>(); for (String fieldName : fieldNames) { if (fields != null && ! fields.contains(fieldName) && ! fields.contains("*")) { continue; //we're not interested in this field Still an issue here } SimpleOrderedMap<Object> fieldMap = new SimpleOrderedMap<>(); SchemaField sfield = schema.getFieldOrNull( fieldName ); FieldType ftype = (sfield==null)?null:sfield.getType(); fieldMap.add( "type", (ftype==null)?null:ftype.getTypeName() ); fieldMap.add("schema", getFieldFlags(sfield)); if (sfield != null && schema.isDynamicField(sfield.getName()) && schema.getDynamicPattern(sfield.getName()) != null) { fieldMap.add("dynamicBase", schema.getDynamicPattern(sfield.getName())); } Terms terms = reader.terms(fieldName); if (terms == null) { // Not indexed, so we need to report what we can (it made it through the fl param if specified) finfo.add( fieldName, fieldMap ); continue; } if(sfield != null && sfield.indexed() ) { if (params.getBool(INCLUDE_INDEX_FIELD_FLAGS,true)) { Document doc = getFirstLiveDoc(terms, reader); if (doc != null) { // Found a document with this field try { IndexableField fld = doc.getField(fieldName); if (fld != null) { fieldMap.add("index", getFieldFlags(fld)); } else { // it is a non-stored field... fieldMap.add("index", "(unstored field)"); } } catch (Exception ex) { log.warn("error reading field: {}", fieldName); } } } fieldMap.add("docs", terms.getDocCount()); } if (fields != null && (fields.contains(fieldName) || fields.contains("*"))) { getDetailedFieldInfo(req, fieldName, fieldMap); } // Add the field finfo.add( fieldName, fieldMap ); } return finfo; }
Example 10
Source File: ExportWriter.java From lucene-solr with Apache License 2.0 | 4 votes |
protected FieldWriter[] getFieldWriters(String[] fields, SolrIndexSearcher searcher) throws IOException { IndexSchema schema = searcher.getSchema(); FieldWriter[] writers = new FieldWriter[fields.length]; for (int i = 0; i < fields.length; i++) { String field = fields[i]; SchemaField schemaField = null; try { schemaField = schema.getField(field); } catch (Exception e) { throw new IOException(e); } if (!schemaField.hasDocValues()) { throw new IOException(schemaField + " must have DocValues to use this feature."); } boolean multiValued = schemaField.multiValued(); FieldType fieldType = schemaField.getType(); if (fieldType instanceof SortableTextField && schemaField.useDocValuesAsStored() == false) { throw new IOException(schemaField + " Must have useDocValuesAsStored='true' to be used with export writer"); } if (fieldType instanceof IntValueFieldType) { if (multiValued) { writers[i] = new MultiFieldWriter(field, fieldType, schemaField, true); } else { writers[i] = new IntFieldWriter(field); } } else if (fieldType instanceof LongValueFieldType) { if (multiValued) { writers[i] = new MultiFieldWriter(field, fieldType, schemaField, true); } else { writers[i] = new LongFieldWriter(field); } } else if (fieldType instanceof FloatValueFieldType) { if (multiValued) { writers[i] = new MultiFieldWriter(field, fieldType, schemaField, true); } else { writers[i] = new FloatFieldWriter(field); } } else if (fieldType instanceof DoubleValueFieldType) { if (multiValued) { writers[i] = new MultiFieldWriter(field, fieldType, schemaField, true); } else { writers[i] = new DoubleFieldWriter(field); } } else if (fieldType instanceof StrField || fieldType instanceof SortableTextField) { if (multiValued) { writers[i] = new MultiFieldWriter(field, fieldType, schemaField, false); } else { writers[i] = new StringFieldWriter(field, fieldType); } } else if (fieldType instanceof DateValueFieldType) { if (multiValued) { writers[i] = new MultiFieldWriter(field, fieldType, schemaField, false); } else { writers[i] = new DateFieldWriter(field); } } else if (fieldType instanceof BoolField) { if (multiValued) { writers[i] = new MultiFieldWriter(field, fieldType, schemaField, true); } else { writers[i] = new BoolFieldWriter(field, fieldType); } } else { throw new IOException("Export fields must be one of the following types: int,float,long,double,string,date,boolean,SortableText"); } } return writers; }
Example 11
Source File: FileBasedSpellChecker.java From lucene-solr with Apache License 2.0 | 4 votes |
private void loadExternalFileDictionary(SolrCore core, SolrIndexSearcher searcher) { try { IndexSchema schema = null == searcher ? core.getLatestSchema() : searcher.getSchema(); // Get the field's analyzer if (fieldTypeName != null && schema.getFieldTypeNoEx(fieldTypeName) != null) { FieldType fieldType = schema.getFieldTypes().get(fieldTypeName); // Do index-time analysis using the given fieldType's analyzer Directory ramDir = new ByteBuffersDirectory(); LogMergePolicy mp = new LogByteSizeMergePolicy(); mp.setMergeFactor(300); IndexWriter writer = new IndexWriter( ramDir, new IndexWriterConfig(fieldType.getIndexAnalyzer()). setMaxBufferedDocs(150). setMergePolicy(mp). setOpenMode(IndexWriterConfig.OpenMode.CREATE) // TODO: if we enable this, codec gets angry since field won't exist in the schema // .setCodec(core.getCodec()) ); List<String> lines = core.getResourceLoader().getLines(sourceLocation, characterEncoding); for (String s : lines) { Document d = new Document(); d.add(new TextField(WORD_FIELD_NAME, s, Field.Store.NO)); writer.addDocument(d); } writer.forceMerge(1); writer.close(); dictionary = new HighFrequencyDictionary(DirectoryReader.open(ramDir), WORD_FIELD_NAME, 0.0f); } else { // check if character encoding is defined if (characterEncoding == null) { dictionary = new PlainTextDictionary(core.getResourceLoader().openResource(sourceLocation)); } else { dictionary = new PlainTextDictionary(new InputStreamReader(core.getResourceLoader().openResource(sourceLocation), characterEncoding)); } } } catch (IOException e) { log.error( "Unable to load spellings", e); } }
Example 12
Source File: QueryAutoFilteringComponent.java From query-autofiltering-component with Apache License 2.0 | 4 votes |
private String getFilterQuery( ResponseBuilder rb, String fieldName, ArrayList<String> valList, int[] termPosRange, ArrayList<char[]> queryTokens, String suffix) { if (fieldName.indexOf( fieldDelim ) > 0) { return getFilterQuery( rb, fieldName.split( fieldSplitExpr ), valList, termPosRange, queryTokens, suffix ); } if (valList.size() == 1) { // check if valList[0] is multi-term - if so, check if there is a single term equivalent // if this returns non-null, create an OR query with single term version // example "white linen perfume" vs "white linen shirt" where "White Linen" is a brand String term = valList.get( 0 ); if (term.indexOf( " " ) > 0) { String singleTermQuery = getSingleTermQuery( term ); if (singleTermQuery != null) { StringBuilder strb = new StringBuilder( ); // EH: possible meta-escaping problem if value includes {!field f=<fieldName>}value strb.append( "(" ).append( fieldName ).append( ":" ) .append( term ).append( " OR (" ).append( singleTermQuery ).append( "))" ).append( suffix ); Log.debug( "returning composite query: " + strb.toString( ) ); return strb.toString( ); } } String query = fieldName + ":" + term + suffix; Log.debug( "returning single query: " + query ); return query; } else { SolrIndexSearcher searcher = rb.req.getSearcher(); IndexSchema schema = searcher.getSchema(); SchemaField field = schema.getField(fieldName); boolean useAnd = field.multiValued() && useAndForMultiValuedFields; // if query has 'or' in it and or is at a position 'within' the values for this field ... if (useAnd) { for (int i = termPosRange[0] + 1; i < termPosRange[1]; i++ ) { char[] qToken = queryTokens.get( i ); // is the token 'or'? if (qToken.length == 2 && qToken[0] == 'o' && qToken[1] == 'r' ) { useAnd = false; break; } } } StringBuilder orQ = new StringBuilder( ); for (String val : valList ) { if (orQ.length() > 0) orQ.append( (useAnd ? " AND " : " OR ") ); orQ.append( val ); } return fieldName + ":(" + orQ.toString() + ")" + suffix; } }
Example 13
Source File: QueryAutoFilteringComponent.java From query-autofiltering-component with Apache License 2.0 | 4 votes |
private String getFilterQuery( ResponseBuilder rb, String fieldName, ArrayList<String> valList, int[] termPosRange, ArrayList<char[]> queryTokens, String suffix) { if (fieldName.indexOf( fieldDelim ) > 0) { return getFilterQuery( rb, fieldName.split( fieldSplitExpr ), valList, termPosRange, queryTokens, suffix ); } if (valList.size() == 1) { // check if valList[0] is multi-term - if so, check if there is a single term equivalent // if this returns non-null, create an OR query with single term version // example "white linen perfume" vs "white linen shirt" where "White Linen" is a brand String term = valList.get( 0 ); if (term.indexOf( " " ) > 0) { String singleTermQuery = getSingleTermQuery( term ); if (singleTermQuery != null) { StringBuilder strb = new StringBuilder( ); strb.append( "(" ).append( fieldName ).append( ":" ) .append( term ).append( " OR (" ).append( singleTermQuery ).append( "))" ).append( suffix ); Log.debug( "returning composite query: " + strb.toString( ) ); return strb.toString( ); } } String query = fieldName + ":" + term + suffix; Log.debug( "returning single query: " + query ); return query; } else { // Check if it is a MultiValued Field - if so, use AND internally SolrIndexSearcher searcher = rb.req.getSearcher(); IndexSchema schema = searcher.getSchema(); SchemaField field = schema.getField(fieldName); boolean useAnd = field.multiValued() && useAndForMultiValuedFields; // if query has 'or' in it and or is at a position 'within' the values for this field ... if (useAnd) { for (int i = termPosRange[0] + 1; i < termPosRange[1]; i++ ) { char[] qToken = queryTokens.get( i ); if (qToken.length == 2 && qToken[0] == 'o' && qToken[1] == 'r' ) { useAnd = false; break; } } } StringBuilder orQ = new StringBuilder( ); for (String val : valList ) { if (orQ.length() > 0) orQ.append( (useAnd ? " AND " : " OR ") ); orQ.append( val ); } String fq = fieldName + ":(" + orQ.toString() + ")" + suffix; Log.debug( "fq = " + fq ); return fq; } }