Java Code Examples for org.elasticsearch.search.aggregations.support.ValuesSource#Bytes
The following examples show how to use
org.elasticsearch.search.aggregations.support.ValuesSource#Bytes .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SamplerAggregator.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override protected Aggregator doCreateInternal(ValuesSource valuesSource, AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException { if (valuesSource instanceof ValuesSource.Numeric) { return new DiversifiedNumericSamplerAggregator(name, shardSize, factories, context, parent, pipelineAggregators, metaData, (Numeric) valuesSource, maxDocsPerValue); } if (valuesSource instanceof ValuesSource.Bytes) { ExecutionMode execution = null; if (executionHint != null) { execution = ExecutionMode.fromString(executionHint, context.searchContext().parseFieldMatcher()); } // In some cases using ordinals is just not supported: override // it if(execution==null){ execution = ExecutionMode.GLOBAL_ORDINALS; } if ((execution.needsGlobalOrdinals()) && (!(valuesSource instanceof ValuesSource.Bytes.WithOrdinals))) { execution = ExecutionMode.MAP; } return execution.create(name, factories, shardSize, maxDocsPerValue, valuesSource, context, parent, pipelineAggregators, metaData); } throw new AggregationExecutionException("Sampler aggregation cannot be applied to field [" + config.fieldContext().field() + "]. It can only be applied to numeric or string fields."); }
Example 2
Source File: TopKAggregator.java From elasticsearch-topk-plugin with Apache License 2.0 | 5 votes |
public TopKAggregator(String name, Number size, Number capacity, AggregatorFactories factories, long estimatedBucketsCount, ValuesSource.Bytes valuesSource, AggregationContext aggregationContext, Aggregator parent) { super(name, factories, aggregationContext, parent); this.size = size; this.capacity = capacity; this.valuesSource = valuesSource; if (valuesSource != null) { final long initialSize = estimatedBucketsCount < 2 ? 1 : estimatedBucketsCount; this.summaries = bigArrays.newObjectArray(initialSize); this.bucketOrds = bigArrays.newObjectArray(initialSize); this.termToBucket = bigArrays.newObjectArray(initialSize); } }
Example 3
Source File: TermsAggregatorFactory.java From Elasticsearch with Apache License 2.0 | 4 votes |
@Override protected Aggregator doCreateInternal(ValuesSource valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, aggregationContext, parent); } if (valuesSource instanceof ValuesSource.Bytes) { ExecutionMode execution = null; if (executionHint != null) { execution = ExecutionMode.fromString(executionHint, aggregationContext.searchContext().parseFieldMatcher()); } // In some cases, using ordinals is just not supported: override it if (!(valuesSource instanceof ValuesSource.Bytes.WithOrdinals)) { execution = ExecutionMode.MAP; } final long maxOrd; final double ratio; if (execution == null || execution.needsGlobalOrdinals()) { ValuesSource.Bytes.WithOrdinals valueSourceWithOrdinals = (ValuesSource.Bytes.WithOrdinals) valuesSource; IndexSearcher indexSearcher = aggregationContext.searchContext().searcher(); maxOrd = valueSourceWithOrdinals.globalMaxOrd(indexSearcher); ratio = maxOrd / ((double) indexSearcher.getIndexReader().numDocs()); } else { maxOrd = -1; ratio = -1; } // Let's try to use a good default if (execution == null) { // if there is a parent bucket aggregator the number of instances of this aggregator is going // to be unbounded and most instances may only aggregate few documents, so use hashed based // global ordinals to keep the bucket ords dense. if (Aggregator.descendsFromBucketAggregator(parent)) { execution = ExecutionMode.GLOBAL_ORDINALS_HASH; } else { if (factories == AggregatorFactories.EMPTY) { if (ratio <= 0.5 && maxOrd <= 2048) { // 0.5: At least we need reduce the number of global ordinals look-ups by half // 2048: GLOBAL_ORDINALS_LOW_CARDINALITY has additional memory usage, which directly linked to maxOrd, so we need to limit. execution = ExecutionMode.GLOBAL_ORDINALS_LOW_CARDINALITY; } else { execution = ExecutionMode.GLOBAL_ORDINALS; } } else { execution = ExecutionMode.GLOBAL_ORDINALS; } } } return execution.create(name, factories, valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, collectMode, showTermDocCountError, pipelineAggregators, metaData); } if ((includeExclude != null) && (includeExclude.isRegexBased())) { throw new AggregationExecutionException( "Aggregation [" + name + "] cannot support regular expression style include/exclude " + "settings as they can only be applied to string fields. Use an array of numeric values for include/exclude clauses used to filter numeric fields"); } if (valuesSource instanceof ValuesSource.Numeric) { IncludeExclude.LongFilter longFilter = null; if (((ValuesSource.Numeric) valuesSource).isFloatingPoint()) { if (includeExclude != null) { longFilter = includeExclude.convertToDoubleFilter(); } return new DoubleTermsAggregator(name, factories, (ValuesSource.Numeric) valuesSource, config.format(), order, bucketCountThresholds, aggregationContext, parent, collectMode, showTermDocCountError, longFilter, pipelineAggregators, metaData); } if (includeExclude != null) { longFilter = includeExclude.convertToLongFilter(); } return new LongTermsAggregator(name, factories, (ValuesSource.Numeric) valuesSource, config.format(), order, bucketCountThresholds, aggregationContext, parent, collectMode, showTermDocCountError, longFilter, pipelineAggregators, metaData); } throw new AggregationExecutionException("terms aggregation cannot be applied to field [" + config.fieldContext().field() + "]. It can only be applied to numeric or string fields."); }
Example 4
Source File: TopKParser.java From elasticsearch-topk-plugin with Apache License 2.0 | 4 votes |
@Override public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException { ValuesSourceConfig<ValuesSource.Bytes> config = new ValuesSourceConfig<>(ValuesSource.Bytes.class); String field = null; Number size = null; Number capacity = 1000; XContentParser.Token token; String currentFieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { if ("field".equals(currentFieldName)) { field = parser.text(); } else { throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.VALUE_NUMBER) { if ("size".equals(currentFieldName)) { size = parser.numberValue(); } else if ("capacity".equals(currentFieldName)) { capacity = parser.numberValue(); } else { throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } else { throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); } } if (field == null) { throw new SearchParseException(context, "Key 'field' cannot be null."); } if (size == null) { throw new SearchParseException(context, "Key 'size' cannot be null."); } FieldMapper<?> mapper = context.smartNameFieldMapper(field); if (mapper == null) { config.unmapped(true); return new TopKAggregator.Factory(aggregationName, config, size, capacity); } config.fieldContext(new FieldContext(field, context.fieldData().getForField(mapper), mapper)); return new TopKAggregator.Factory(aggregationName, config, size, capacity); }
Example 5
Source File: TopKAggregator.java From elasticsearch-topk-plugin with Apache License 2.0 | 4 votes |
public Factory(String name, ValuesSourceConfig<ValuesSource.Bytes> valueSourceConfig, Number size, Number capacity) { super(name, InternalTopK.TYPE.name(), valueSourceConfig); this.size = size; this.capacity = capacity; }
Example 6
Source File: TopKAggregator.java From elasticsearch-topk-plugin with Apache License 2.0 | 4 votes |
@Override public Aggregator create(ValuesSource.Bytes valuesSource, long expectedBucketsCount, AggregationContext aggregationContext, Aggregator parent) { return new TopKAggregator(name, size, capacity, factories, expectedBucketsCount, valuesSource, aggregationContext, parent); }