Java Code Examples for org.apache.solr.schema.SchemaField#hasDocValues()
The following examples show how to use
org.apache.solr.schema.SchemaField#hasDocValues() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: JoinQuery.java From lucene-solr with Apache License 2.0 | 6 votes |
public DocSet getDocSet() throws IOException { SchemaField fromSchemaField = fromSearcher.getSchema().getField(fromField); SchemaField toSchemaField = toSearcher.getSchema().getField(toField); boolean usePoints = false; if (toSchemaField.getType().isPointField()) { if (!fromSchemaField.hasDocValues()) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "join from field " + fromSchemaField + " should have docValues to join with points field " + toSchemaField); } usePoints = true; } if (!usePoints) { return getDocSetEnumerate(); } // point fields GraphPointsCollector collector = new GraphPointsCollector(fromSchemaField, null, null); fromSearcher.search(q, collector); Query resultQ = collector.getResultQuery(toSchemaField, false); // don't cache the resulting docSet... the query may be very large. Better to cache the results of the join query itself DocSet result = resultQ==null ? DocSet.empty() : toSearcher.getDocSetNC(resultQ, null); return result; }
Example 2
Source File: UniqueAgg.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override public SlotAcc createSlotAcc(FacetContext fcontext, long numDocs, int numSlots) throws IOException { SchemaField sf = fcontext.qcontext.searcher().getSchema().getField(getArg()); if (sf.multiValued() || sf.getType().multiValuedFieldCache()) { if (sf.getType().isPointField()) { return new SortedNumericAcc(fcontext, getArg(), numSlots); } else if (sf.hasDocValues()) { return new UniqueMultiDvSlotAcc(fcontext, sf, numSlots, null); } else { return new UniqueMultivaluedSlotAcc(fcontext, sf, numSlots, null); } } else { if (sf.getType().getNumberType() != null) { return new NumericAcc(fcontext, getArg(), numSlots); } else { return new UniqueSinglevaluedSlotAcc(fcontext, sf, numSlots, null); } } }
Example 3
Source File: MtasSolrComponentFacet.java From mtas with Apache License 2.0 | 6 votes |
/** * Gets the field type. * * @param schema the schema * @param field the field * @return the field type * @throws IOException Signals that an I/O exception has occurred. */ private String getFieldType(IndexSchema schema, String field) throws IOException { SchemaField sf = schema.getField(field); FieldType ft = sf.getType(); if (ft != null) { if (ft.isPointField() && !sf.hasDocValues()) { return ComponentFacet.TYPE_POINTFIELD_WITHOUT_DOCVALUES; } NumberType nt = ft.getNumberType(); if (nt != null) { return nt.name(); } else { return ComponentFacet.TYPE_STRING; } } else { // best guess return ComponentFacet.TYPE_STRING; } }
Example 4
Source File: RealTimeGetComponent.java From lucene-solr with Apache License 2.0 | 6 votes |
private static SolrInputDocument toSolrInputDocument(Document doc, IndexSchema schema) { SolrInputDocument out = new SolrInputDocument(); for( IndexableField f : doc.getFields() ) { String fname = f.name(); SchemaField sf = schema.getFieldOrNull(f.name()); Object val = null; if (sf != null) { if ((!sf.hasDocValues() && !sf.stored()) || schema.isCopyFieldTarget(sf)) continue; val = sf.getType().toObject(f); // object or external string? } else { val = f.stringValue(); if (val == null) val = f.numericValue(); if (val == null) val = f.binaryValue(); if (val == null) val = f; } // todo: how to handle targets of copy fields (including polyfield sub-fields)? out.addField(fname, val); } return out; }
Example 5
Source File: TestRetrieveFieldsOptimizer.java From lucene-solr with Apache License 2.0 | 6 votes |
IndexSchema addFields(IndexSchema schema) { List<SchemaField> fieldsToAdd = new ArrayList<>(); for (RetrieveField field : fields.values()) { allFields.add(field); SchemaField schemaField = field.schemaField; fieldsToAdd.add(schemaField); if (schemaField.multiValued()) { multiValuedFields.add(field); } if (schemaField.hasDocValues() && schemaField.stored() == false) { dvNotStoredFields.add(field); } if (schemaField.hasDocValues() == false && schemaField.stored()) { storedNotDvFields.add(field); } if (schemaField.hasDocValues() && schemaField.stored()) { storedAndDvFields.add(field); } if (schemaField.stored() && schemaField.multiValued()) { storedMvFields.add(field); } } return schema.addFields(fieldsToAdd, Collections.emptyMap(), false); }
Example 6
Source File: CountValsAgg.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override public SlotAcc createSlotAcc(FacetContext fcontext, long numDocs, int numSlots) throws IOException { ValueSource vs = getArg(); if (vs instanceof FieldNameValueSource) { String field = ((FieldNameValueSource)vs).getFieldName(); SchemaField sf = fcontext.qcontext.searcher().getSchema().getField(field); if (sf.multiValued() || sf.getType().multiValuedFieldCache()) { if (sf.hasDocValues()) { if (sf.getType().isPointField()) { return new CountSortedNumericDVAcc(fcontext, sf, numSlots); } return new CountSortedSetDVAcc(fcontext, sf, numSlots); } if (sf.getType().isPointField()) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'countvals' aggregation not supported for PointField without docValues"); } return new CountMultiValuedAcc(fcontext, sf, numSlots); } else { vs = sf.getType().getValueSource(sf, null); } } return new CountValSlotAcc(vs, fcontext, numSlots); }
Example 7
Source File: TopLevelJoinQuery.java From lucene-solr with Apache License 2.0 | 6 votes |
private SortedSetDocValues validateAndFetchDocValues(SolrIndexSearcher solrSearcher, String fieldName, String querySide) throws IOException { final IndexSchema schema = solrSearcher.getSchema(); final SchemaField field = schema.getFieldOrNull(fieldName); if (field == null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, querySide + " field '" + fieldName + "' does not exist"); } if (!field.hasDocValues()) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'top-level' join queries require both 'from' and 'to' fields to have docValues, but " + querySide + " field [" + fieldName + "] does not."); } final LeafReader leafReader = solrSearcher.getSlowAtomicReader(); if (field.multiValued()) { return DocValues.getSortedSet(leafReader, fieldName); } return DocValues.singleton(DocValues.getSorted(leafReader, fieldName)); }
Example 8
Source File: SimpleFacets.java From lucene-solr with Apache License 2.0 | 6 votes |
private Collector getInsanityWrapper(final String field, Collector collector) { SchemaField sf = searcher.getSchema().getFieldOrNull(field); if (sf != null && !sf.hasDocValues() && !sf.multiValued() && sf.getType().getNumberType() != null) { // it's a single-valued numeric field: we must currently create insanity :( // there isn't a GroupedFacetCollector that works on numerics right now... return new FilterCollector(collector) { @Override public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { LeafReader insane = Insanity.wrapInsanity(context.reader(), field); return in.getLeafCollector(insane.getContext()); } }; } else { return collector; } }
Example 9
Source File: SolrDocumentFetcher.java From lucene-solr with Apache License 2.0 | 5 votes |
private boolean canSubstituteDvForStored(FieldInfo fieldInfo, SchemaField schemaField) { if (!schemaField.hasDocValues() || !schemaField.stored()) return false; if (schemaField.multiValued()) return false; DocValuesType docValuesType = fieldInfo.getDocValuesType(); NumberType numberType = schemaField.getType().getNumberType(); // can not decode a numeric without knowing its numberType if (numberType == null && (docValuesType == DocValuesType.SORTED_NUMERIC || docValuesType == DocValuesType.NUMERIC)) { return false; } return true; }
Example 10
Source File: VersionInfo.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Gets and returns the {@link org.apache.solr.common.params.CommonParams#VERSION_FIELD} from the specified * schema, after verifying that it is indexed, stored, and single-valued. * If any of these pre-conditions are not met, it throws a SolrException * with a user suitable message indicating the problem. */ public static SchemaField getAndCheckVersionField(IndexSchema schema) throws SolrException { final String errPrefix = VERSION_FIELD + " field must exist in schema and be searchable (indexed or docValues) and retrievable(stored or docValues) and not multiValued"; SchemaField sf = schema.getFieldOrNull(VERSION_FIELD); if (null == sf) { throw new SolrException (SolrException.ErrorCode.SERVER_ERROR, errPrefix + " (" + VERSION_FIELD + " does not exist)"); } if ( !sf.indexed() && !sf.hasDocValues()) { throw new SolrException (SolrException.ErrorCode.SERVER_ERROR, errPrefix + " (" + VERSION_FIELD + " not searchable"); } if ( !sf.stored() && !sf.hasDocValues()) { throw new SolrException (SolrException.ErrorCode.SERVER_ERROR, errPrefix + " (" + VERSION_FIELD + " not retrievable"); } if ( sf.multiValued() ) { throw new SolrException (SolrException.ErrorCode.SERVER_ERROR, errPrefix + " (" + VERSION_FIELD + " is multiValued"); } return sf; }
Example 11
Source File: SumAgg.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public SlotAcc createSlotAcc(FacetContext fcontext, long numDocs, int numSlots) throws IOException { ValueSource vs = getArg(); if (vs instanceof FieldNameValueSource) { String field = ((FieldNameValueSource)vs).getFieldName(); SchemaField sf = fcontext.qcontext.searcher().getSchema().getField(field); if (sf.getType().getNumberType() == null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, name() + " aggregation not supported for " + sf.getType().getTypeName()); } if (sf.multiValued() || sf.getType().multiValuedFieldCache()) { if (sf.hasDocValues()) { if (sf.getType().isPointField()) { return new SumSortedNumericAcc(fcontext, sf, numSlots); } return new SumSortedSetAcc(fcontext, sf, numSlots); } if (sf.getType().isPointField()) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, name() + " aggregation not supported for PointField w/o docValues"); } return new SumUnInvertedFieldAcc(fcontext, sf, numSlots); } vs = sf.getType().getValueSource(sf, null); } return new SlotAcc.SumSlotAcc(vs, fcontext, numSlots); }
Example 12
Source File: TabularResponseWriter.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Returns true if field needs to be skipped else false * @param field name of the field * @return boolean value */ public boolean shouldSkipField(String field) { Set<String> explicitReqFields = returnFields.getExplicitlyRequestedFieldNames(); SchemaField sf = schema.getFieldOrNull(field); // Return stored fields or useDocValuesAsStored=true fields, // unless an explicit field list is specified return (returnStoredOrDocValStored && !(explicitReqFields != null && explicitReqFields.contains(field)) && sf!= null && !sf.stored() && !(sf.hasDocValues() && sf.useDocValuesAsStored())); }
Example 13
Source File: SimpleFacets.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Returns a <code>NamedList</code> with each entry having the "key" of the interval as name and the count of docs * in that interval as value. All intervals added in the request are included in the returned * <code>NamedList</code> (included those with 0 count), and it's required that the order of the intervals * is deterministic and equals in all shards of a distributed request, otherwise the collation of results * will fail. * */ public NamedList<Object> getFacetIntervalCounts() throws IOException, SyntaxError { NamedList<Object> res = new SimpleOrderedMap<Object>(); String[] fields = global.getParams(FacetParams.FACET_INTERVAL); if (fields == null || fields.length == 0) return res; for (String field : fields) { final ParsedParams parsed = parseParams(FacetParams.FACET_INTERVAL, field); String[] intervalStrs = parsed.required.getFieldParams(parsed.facetValue, FacetParams.FACET_INTERVAL_SET); SchemaField schemaField = searcher.getCore().getLatestSchema().getField(parsed.facetValue); if (parsed.params.getBool(GroupParams.GROUP_FACET, false)) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Interval Faceting can't be used with " + GroupParams.GROUP_FACET); } if (schemaField.getType().isPointField() && !schemaField.hasDocValues()) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can't use interval faceting on a PointField without docValues"); } SimpleOrderedMap<Integer> fieldResults = new SimpleOrderedMap<Integer>(); res.add(parsed.key, fieldResults); IntervalFacets intervalFacets = new IntervalFacets(schemaField, searcher, parsed.docs, intervalStrs, parsed.params); for (FacetInterval interval : intervalFacets) { fieldResults.add(interval.getKey(), interval.getCount()); } } return res; }
Example 14
Source File: TestSolrQueryParser.java From lucene-solr with Apache License 2.0 | 5 votes |
@Test public void testSyntax() throws Exception { // a bare * should be treated as *:* assertJQ(req("q", "*", "df", "doesnotexist_s") , "/response/docs/[0]==" // make sure we get something... ); assertJQ(req("q", "doesnotexist_s:*") , "/response/numFound==0" // nothing should be found ); assertJQ(req("q", "doesnotexist_s:( * * * )") , "/response/numFound==0" // nothing should be found ); // length of date math caused issues... { SchemaField foo_dt = h.getCore().getLatestSchema().getField("foo_dt"); String expected = "foo_dt:2013-09-11T00:00:00Z"; if (foo_dt.getType().isPointField()) { expected = "(foo_dt:[1378857600000 TO 1378857600000])"; if (foo_dt.hasDocValues() && foo_dt.indexed()) { expected = "IndexOrDocValuesQuery"+expected ; } } assertJQ(req("q", "foo_dt:\"2013-03-08T00:46:15Z/DAY+000MILLISECONDS+00SECONDS+00MINUTES+00HOURS+0000000000YEARS+6MONTHS+3DAYS\"", "debug", "query") , "/debug/parsedquery=='"+expected+"'"); } }
Example 15
Source File: PercentileAgg.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public SlotAcc createSlotAcc(FacetContext fcontext, long numDocs, int numSlots) throws IOException { ValueSource vs = getArg(); if (vs instanceof FieldNameValueSource) { String field = ((FieldNameValueSource) vs).getFieldName(); SchemaField sf = fcontext.qcontext.searcher().getSchema().getField(field); if (sf.getType().getNumberType() == null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, name() + " aggregation not supported for " + sf.getType().getTypeName()); } if (sf.multiValued() || sf.getType().multiValuedFieldCache()) { if (sf.hasDocValues()) { if (sf.getType().isPointField()) { return new PercentileSortedNumericAcc(fcontext, sf, numSlots); } return new PercentileSortedSetAcc(fcontext, sf, numSlots); } if (sf.getType().isPointField()) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, name() + " aggregation not supported for PointField w/o docValues"); } return new PercentileUnInvertedFieldAcc(fcontext, sf, numSlots); } vs = sf.getType().getValueSource(sf, null); } return new Acc(vs, fcontext, numSlots); }
Example 16
Source File: SolrDocumentFetcher.java From lucene-solr with Apache License 2.0 | 4 votes |
@SuppressWarnings({"unchecked"}) SolrDocumentFetcher(SolrIndexSearcher searcher, SolrConfig solrConfig, boolean cachingEnabled) { this.searcher = searcher; this.enableLazyFieldLoading = solrConfig.enableLazyFieldLoading; if (cachingEnabled) { documentCache = solrConfig.documentCacheConfig == null ? null : solrConfig.documentCacheConfig.newInstance(); } else { documentCache = null; } final Set<String> nonStoredDVsUsedAsStored = new HashSet<>(); final Set<String> allNonStoredDVs = new HashSet<>(); final Set<String> nonStoredDVsWithoutCopyTargets = new HashSet<>(); final Set<String> storedLargeFields = new HashSet<>(); final Set<String> dvsCanSubstituteStored = new HashSet<>(); final Set<String> allStoreds = new HashSet<>(); for (FieldInfo fieldInfo : searcher.getFieldInfos()) { // can find materialized dynamic fields, unlike using the Solr IndexSchema. final SchemaField schemaField = searcher.getSchema().getFieldOrNull(fieldInfo.name); if (schemaField == null) { continue; } if (canSubstituteDvForStored(fieldInfo, schemaField)) { dvsCanSubstituteStored.add(fieldInfo.name); } if (schemaField.stored()) { allStoreds.add(fieldInfo.name); } if (!schemaField.stored() && schemaField.hasDocValues()) { if (schemaField.useDocValuesAsStored()) { nonStoredDVsUsedAsStored.add(fieldInfo.name); } allNonStoredDVs.add(fieldInfo.name); if (!searcher.getSchema().isCopyFieldTarget(schemaField)) { nonStoredDVsWithoutCopyTargets.add(fieldInfo.name); } } if (schemaField.stored() && schemaField.isLarge()) { storedLargeFields.add(schemaField.getName()); } } this.nonStoredDVsUsedAsStored = Collections.unmodifiableSet(nonStoredDVsUsedAsStored); this.allNonStoredDVs = Collections.unmodifiableSet(allNonStoredDVs); this.nonStoredDVsWithoutCopyTargets = Collections.unmodifiableSet(nonStoredDVsWithoutCopyTargets); this.largeFields = Collections.unmodifiableSet(storedLargeFields); this.dvsCanSubstituteStored = Collections.unmodifiableSet(dvsCanSubstituteStored); this.allStored = Collections.unmodifiableSet(allStoreds); }
Example 17
Source File: ReverseOrdFieldSource.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override @SuppressWarnings({"rawtypes"}) public FunctionValues getValues(Map context, LeafReaderContext readerContext) throws IOException { final int off = readerContext.docBase; final LeafReader r; Object o = context.get("searcher"); if (o instanceof SolrIndexSearcher) { @SuppressWarnings("resource") final SolrIndexSearcher is = (SolrIndexSearcher) o; SchemaField sf = is.getSchema().getFieldOrNull(field); if (sf != null && sf.getType().isPointField()) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "rord() is not supported over Points based field " + field); } if (sf != null && sf.hasDocValues() == false && sf.multiValued() == false && sf.getType().getNumberType() != null) { // it's a single-valued numeric field: we must currently create insanity :( List<LeafReaderContext> leaves = is.getIndexReader().leaves(); LeafReader insaneLeaves[] = new LeafReader[leaves.size()]; int upto = 0; for (LeafReaderContext raw : leaves) { insaneLeaves[upto++] = Insanity.wrapInsanity(raw.reader(), field); } r = SlowCompositeReaderWrapper.wrap(new MultiReader(insaneLeaves)); } else { // reuse ordinalmap r = ((SolrIndexSearcher)o).getSlowAtomicReader(); } } else { IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader(); r = SlowCompositeReaderWrapper.wrap(topReader); } // if it's e.g. tokenized/multivalued, emulate old behavior of single-valued fc final SortedDocValues sindex = SortedSetSelector.wrap(DocValues.getSortedSet(r, field), SortedSetSelector.Type.MIN); final int end = sindex.getValueCount(); return new IntDocValues(this) { @Override public int intVal(int doc) throws IOException { if (doc+off > sindex.docID()) { sindex.advance(doc+off); } if (doc+off == sindex.docID()) { return (end - sindex.ordValue() - 1); } else { return end; } } }; }
Example 18
Source File: FacetField.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override @SuppressWarnings("rawtypes") public FacetProcessor createFacetProcessor(FacetContext fcontext) { SchemaField sf = fcontext.searcher.getSchema().getField(field); FieldType ft = sf.getType(); boolean multiToken = sf.multiValued() || ft.multiValuedFieldCache(); if (fcontext.facetInfo != null) { // refinement... we will end up either skipping the entire facet, or doing calculating only specific facet buckets if (multiToken && !sf.hasDocValues() && method!=FacetMethod.DV && sf.isUninvertible()) { // Match the access method from the first phase. // It won't always matter, but does currently for an all-values bucket return new FacetFieldProcessorByArrayUIF(fcontext, this, sf); } return new FacetFieldProcessorByArrayDV(fcontext, this, sf); } NumberType ntype = ft.getNumberType(); // ensure we can support the requested options for numeric faceting: if (ntype != null) { if (prefix != null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Doesn't make sense to set facet prefix on a numeric field"); } if (mincount == 0) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Numeric fields do not support facet mincount=0; try indexing as terms"); // TODO if indexed=true then we could add support } } // TODO auto-pick ENUM/STREAM SOLR-9351 when index asc and DocSet cardinality is *not* much smaller than term cardinality if (method == FacetMethod.ENUM) {// at the moment these two are the same method = FacetMethod.STREAM; } if (method == FacetMethod.STREAM && sf.indexed() && !ft.isPointField() && // wether we can use stream processing depends on wether this is a shard request, wether // re-sorting has been requested, and if the effective sort during collection is "index asc" ( fcontext.isShard() // for a shard request, the effective per-shard sort must be index asc ? FacetSort.INDEX_ASC.equals(null == prelim_sort ? sort : prelim_sort) // for a non-shard request, we can only use streaming if there is no pre-sorting : (null == prelim_sort && FacetSort.INDEX_ASC.equals( sort ) ) ) ) { return new FacetFieldProcessorByEnumTermsStream(fcontext, this, sf); } // TODO if method=UIF and not single-valued numerics then simply choose that now? TODO add FieldType.getDocValuesType() if (!multiToken) { if (mincount > 0 && prefix == null && (ntype != null || method == FacetMethod.DVHASH)) { // TODO can we auto-pick for strings when term cardinality is much greater than DocSet cardinality? // or if we don't know cardinality but DocSet size is very small return new FacetFieldProcessorByHashDV(fcontext, this, sf); } else if (ntype == null) { // single valued string... return new FacetFieldProcessorByArrayDV(fcontext, this, sf); } else { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Couldn't pick facet algorithm for field " + sf); } } if (sf.hasDocValues() && sf.getType().isPointField()) { return new FacetFieldProcessorByHashDV(fcontext, this, sf); } // multi-valued after this point if (sf.hasDocValues() || method == FacetMethod.DV || !sf.isUninvertible()) { // single and multi-valued string docValues return new FacetFieldProcessorByArrayDV(fcontext, this, sf); } // Top-level multi-valued field cache (UIF) return new FacetFieldProcessorByArrayUIF(fcontext, this, sf); }
Example 19
Source File: TestSolrQueryParser.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test public void testFieldExistsQueries() throws SyntaxError { SolrQueryRequest req = req(); String[] fieldSuffix = new String[] { "ti", "tf", "td", "tl", "tdt", "pi", "pf", "pd", "pl", "pdt", "i", "f", "d", "l", "dt", "s", "b", "is", "fs", "ds", "ls", "dts", "ss", "bs", "i_dv", "f_dv", "d_dv", "l_dv", "dt_dv", "s_dv", "b_dv", "is_dv", "fs_dv", "ds_dv", "ls_dv", "dts_dv", "ss_dv", "bs_dv", "i_dvo", "f_dvo", "d_dvo", "l_dvo", "dt_dvo", "t", "t_on", "b_norms", "s_norms", "dt_norms", "i_norms", "l_norms", "f_norms", "d_norms" }; String[] existenceQueries = new String[] { "*", "[* TO *]" }; for (String existenceQuery : existenceQueries) { for (String suffix : fieldSuffix) { IndexSchema indexSchema = h.getCore().getLatestSchema(); String field = "foo_" + suffix; String query = field + ":" + existenceQuery; QParser qParser = QParser.getParser(query, req); Query createdQuery = qParser.getQuery(); SchemaField schemaField = indexSchema.getField(field); // Test float & double realNumber queries differently if ("[* TO *]".equals(existenceQuery) && (schemaField.getType().getNumberType() == NumberType.DOUBLE || schemaField.getType().getNumberType() == NumberType.FLOAT)) { assertFalse("For float and double fields \"" + query + "\" is not an existence query, so the query returned should not be a DocValuesFieldExistsQuery.", createdQuery instanceof DocValuesFieldExistsQuery); assertFalse("For float and double fields \"" + query + "\" is not an existence query, so the query returned should not be a NormsFieldExistsQuery.", createdQuery instanceof NormsFieldExistsQuery); assertFalse("For float and double fields \"" + query + "\" is not an existence query, so NaN should not be matched via a ConstantScoreQuery.", createdQuery instanceof ConstantScoreQuery); assertFalse("For float and double fields\"" + query + "\" is not an existence query, so NaN should not be matched via a BooleanQuery (NaN and [* TO *]).", createdQuery instanceof BooleanQuery); } else { if (schemaField.hasDocValues()) { assertTrue("Field has docValues, so existence query \"" + query + "\" should return DocValuesFieldExistsQuery", createdQuery instanceof DocValuesFieldExistsQuery); } else if (!schemaField.omitNorms() && !schemaField.getType().isPointField()) { //TODO: Remove !isPointField() for SOLR-14199 assertTrue("Field has norms and no docValues, so existence query \"" + query + "\" should return NormsFieldExistsQuery", createdQuery instanceof NormsFieldExistsQuery); } else if (schemaField.getType().getNumberType() == NumberType.DOUBLE || schemaField.getType().getNumberType() == NumberType.FLOAT) { assertTrue("PointField with NaN values must include \"exists or NaN\" if the field doesn't have norms or docValues: \"" + query + "\".", createdQuery instanceof ConstantScoreQuery); assertTrue("PointField with NaN values must include \"exists or NaN\" if the field doesn't have norms or docValues: \"" + query + "\".", ((ConstantScoreQuery)createdQuery).getQuery() instanceof BooleanQuery); assertEquals("PointField with NaN values must include \"exists or NaN\" if the field doesn't have norms or docValues: \"" + query + "\". This boolean query must be an OR.", 1, ((BooleanQuery)((ConstantScoreQuery)createdQuery).getQuery()).getMinimumNumberShouldMatch()); assertEquals("PointField with NaN values must include \"exists or NaN\" if the field doesn't have norms or docValues: \"" + query + "\". This boolean query must have 2 clauses.", 2, ((BooleanQuery)((ConstantScoreQuery)createdQuery).getQuery()).clauses().size()); } else { assertFalse("Field doesn't have docValues, so existence query \"" + query + "\" should not return DocValuesFieldExistsQuery", createdQuery instanceof DocValuesFieldExistsQuery); assertFalse("Field doesn't have norms, so existence query \"" + query + "\" should not return NormsFieldExistsQuery", createdQuery instanceof NormsFieldExistsQuery); } } } } }
Example 20
Source File: ExportWriter.java From lucene-solr with Apache License 2.0 | 4 votes |
protected FieldWriter[] getFieldWriters(String[] fields, SolrIndexSearcher searcher) throws IOException { IndexSchema schema = searcher.getSchema(); FieldWriter[] writers = new FieldWriter[fields.length]; for (int i = 0; i < fields.length; i++) { String field = fields[i]; SchemaField schemaField = null; try { schemaField = schema.getField(field); } catch (Exception e) { throw new IOException(e); } if (!schemaField.hasDocValues()) { throw new IOException(schemaField + " must have DocValues to use this feature."); } boolean multiValued = schemaField.multiValued(); FieldType fieldType = schemaField.getType(); if (fieldType instanceof SortableTextField && schemaField.useDocValuesAsStored() == false) { throw new IOException(schemaField + " Must have useDocValuesAsStored='true' to be used with export writer"); } if (fieldType instanceof IntValueFieldType) { if (multiValued) { writers[i] = new MultiFieldWriter(field, fieldType, schemaField, true); } else { writers[i] = new IntFieldWriter(field); } } else if (fieldType instanceof LongValueFieldType) { if (multiValued) { writers[i] = new MultiFieldWriter(field, fieldType, schemaField, true); } else { writers[i] = new LongFieldWriter(field); } } else if (fieldType instanceof FloatValueFieldType) { if (multiValued) { writers[i] = new MultiFieldWriter(field, fieldType, schemaField, true); } else { writers[i] = new FloatFieldWriter(field); } } else if (fieldType instanceof DoubleValueFieldType) { if (multiValued) { writers[i] = new MultiFieldWriter(field, fieldType, schemaField, true); } else { writers[i] = new DoubleFieldWriter(field); } } else if (fieldType instanceof StrField || fieldType instanceof SortableTextField) { if (multiValued) { writers[i] = new MultiFieldWriter(field, fieldType, schemaField, false); } else { writers[i] = new StringFieldWriter(field, fieldType); } } else if (fieldType instanceof DateValueFieldType) { if (multiValued) { writers[i] = new MultiFieldWriter(field, fieldType, schemaField, false); } else { writers[i] = new DateFieldWriter(field); } } else if (fieldType instanceof BoolField) { if (multiValued) { writers[i] = new MultiFieldWriter(field, fieldType, schemaField, true); } else { writers[i] = new BoolFieldWriter(field, fieldType); } } else { throw new IOException("Export fields must be one of the following types: int,float,long,double,string,date,boolean,SortableText"); } } return writers; }