Java Code Examples for org.elasticsearch.common.xcontent.XContentParser#longValue()
The following examples show how to use
org.elasticsearch.common.xcontent.XContentParser#longValue() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CardinalityParser.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public AggregatorFactory parse(String name, XContentParser parser, SearchContext context) throws IOException { ValuesSourceParser<?> vsParser = ValuesSourceParser.any(name, InternalCardinality.TYPE, context).formattable(false).build(); long precisionThreshold = -1; XContentParser.Token token; String currentFieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (vsParser.token(currentFieldName, token, parser)) { continue; } else if (token.isValue()) { if (context.parseFieldMatcher().match(currentFieldName, REHASH)) { // ignore } else if (context.parseFieldMatcher().match(currentFieldName, PRECISION_THRESHOLD)) { precisionThreshold = parser.longValue(); } else { throw new SearchParseException(context, "Unknown key for a " + token + " in [" + name + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } else { throw new SearchParseException(context, "Unexpected token " + token + " in [" + name + "].", parser.getTokenLocation()); } } return new CardinalityAggregatorFactory(name, vsParser.config(), precisionThreshold); }
Example 2
Source File: ShardStateMetaData.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public ShardStateMetaData fromXContent(XContentParser parser) throws IOException { XContentParser.Token token = parser.nextToken(); if (token == null) { return null; } long version = -1; Boolean primary = null; String currentFieldName = null; String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { if (VERSION_KEY.equals(currentFieldName)) { version = parser.longValue(); } else if (PRIMARY_KEY.equals(currentFieldName)) { primary = parser.booleanValue(); } else if (INDEX_UUID_KEY.equals(currentFieldName)) { indexUUID = parser.text(); } else { throw new CorruptStateException("unexpected field in shard state [" + currentFieldName + "]"); } } else { throw new CorruptStateException("unexpected token in shard state [" + token.name() + "]"); } } if (primary == null) { throw new CorruptStateException("missing value for [primary] in shard state"); } if (version == -1) { throw new CorruptStateException("missing value for [version] in shard state"); } return new ShardStateMetaData(version, primary, indexUUID); }
Example 3
Source File: TimeConfiguration.java From anomaly-detection with Apache License 2.0 | 5 votes |
/** * Parse raw json content into schedule instance. * * @param parser json based content parser * @return schedule instance * @throws IOException IOException if content can't be parsed correctly */ public static TimeConfiguration parse(XContentParser parser) throws IOException { long interval = 0; ChronoUnit unit = null; String scheduleType = null; ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { scheduleType = parser.currentName(); parser.nextToken(); switch (scheduleType) { case PERIOD_FIELD: while (parser.nextToken() != XContentParser.Token.END_OBJECT) { String periodFieldName = parser.currentName(); parser.nextToken(); switch (periodFieldName) { case INTERVAL_FIELD: interval = parser.longValue(); break; case UNIT_FIELD: unit = ChronoUnit.valueOf(parser.text().toUpperCase(Locale.ROOT)); break; default: break; } } break; default: break; } } if (PERIOD_FIELD.equals(scheduleType)) { return new IntervalTimeConfiguration(interval, unit); } throw new IllegalArgumentException("Find no schedule definition"); }
Example 4
Source File: AbstractXContentParser.java From Elasticsearch with Apache License 2.0 | 5 votes |
static Object readValue(XContentParser parser, MapFactory mapFactory, XContentParser.Token token) throws IOException { if (token == XContentParser.Token.VALUE_NULL) { return null; } else if (token == XContentParser.Token.VALUE_STRING) { return parser.text(); } else if (token == XContentParser.Token.VALUE_NUMBER) { XContentParser.NumberType numberType = parser.numberType(); if (numberType == XContentParser.NumberType.INT) { return parser.intValue(); } else if (numberType == XContentParser.NumberType.LONG) { return parser.longValue(); } else if (numberType == XContentParser.NumberType.FLOAT) { return parser.floatValue(); } else if (numberType == XContentParser.NumberType.DOUBLE) { return parser.doubleValue(); } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { return parser.booleanValue(); } else if (token == XContentParser.Token.START_OBJECT) { return readMap(parser, mapFactory); } else if (token == XContentParser.Token.START_ARRAY) { return readList(parser, mapFactory); } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { return parser.binaryValue(); } return null; }
Example 5
Source File: LongFieldMapper.java From Elasticsearch with Apache License 2.0 | 4 votes |
@Override protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException { long value; float boost = fieldType().boost(); if (context.externalValueSet()) { Object externalValue = context.externalValue(); if (externalValue == null) { if (fieldType().nullValue() == null) { return; } value = fieldType().nullValue(); } else if (externalValue instanceof String) { String sExternalValue = (String) externalValue; if (sExternalValue.length() == 0) { if (fieldType().nullValue() == null) { return; } value = fieldType().nullValue(); } else { value = Long.parseLong(sExternalValue); } } else { value = ((Number) externalValue).longValue(); } if (context.includeInAll(includeInAll, this)) { context.allEntries().addText(fieldType().names().fullName(), Long.toString(value), boost); } } else { XContentParser parser = context.parser(); if (parser.currentToken() == XContentParser.Token.VALUE_NULL || (parser.currentToken() == XContentParser.Token.VALUE_STRING && parser.textLength() == 0)) { if (fieldType().nullValue() == null) { return; } value = fieldType().nullValue(); if (fieldType().nullValueAsString() != null && (context.includeInAll(includeInAll, this))) { context.allEntries().addText(fieldType().names().fullName(), fieldType().nullValueAsString(), boost); } } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) { XContentParser.Token token; String currentFieldName = null; Long objValue = fieldType().nullValue(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else { if ("value".equals(currentFieldName) || "_value".equals(currentFieldName)) { if (parser.currentToken() != XContentParser.Token.VALUE_NULL) { objValue = parser.longValue(coerce.value()); } } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } if (objValue == null) { // no value return; } value = objValue; } else { value = parser.longValue(coerce.value()); if (context.includeInAll(includeInAll, this)) { context.allEntries().addText(fieldType().names().fullName(), parser.text(), boost); } } } if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { CustomLongNumericField field = new CustomLongNumericField(value, fieldType()); field.setBoost(boost); fields.add(field); } if (fieldType().hasDocValues()) { addDocValue(context, fields, value); } }
Example 6
Source File: SnapshotInfo.java From crate with Apache License 2.0 | 4 votes |
/** * This method creates a SnapshotInfo from internal x-content. It does not * handle x-content written with the external version as external x-content * is only for display purposes and does not need to be parsed. */ public static SnapshotInfo fromXContentInternal(final XContentParser parser) throws IOException { String name = null; String uuid = null; Version version = Version.CURRENT; SnapshotState state = SnapshotState.IN_PROGRESS; String reason = null; List<String> indices = Collections.emptyList(); long startTime = 0; long endTime = 0; int totalShards = 0; int successfulShards = 0; Boolean includeGlobalState = null; List<SnapshotShardFailure> shardFailures = Collections.emptyList(); if (parser.currentToken() == null) { // fresh parser? move to the first token parser.nextToken(); } if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // on a start object move to next token parser.nextToken(); } XContentParser.Token token; if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) { String currentFieldName = parser.currentName(); if (SNAPSHOT.equals(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); token = parser.nextToken(); if (token.isValue()) { if (NAME.equals(currentFieldName)) { name = parser.text(); } else if (UUID.equals(currentFieldName)) { uuid = parser.text(); } else if (STATE.equals(currentFieldName)) { state = SnapshotState.valueOf(parser.text()); } else if (REASON.equals(currentFieldName)) { reason = parser.text(); } else if (START_TIME.equals(currentFieldName)) { startTime = parser.longValue(); } else if (END_TIME.equals(currentFieldName)) { endTime = parser.longValue(); } else if (TOTAL_SHARDS.equals(currentFieldName)) { totalShards = parser.intValue(); } else if (SUCCESSFUL_SHARDS.equals(currentFieldName)) { successfulShards = parser.intValue(); } else if (VERSION_ID.equals(currentFieldName)) { version = Version.fromId(parser.intValue()); } else if (INCLUDE_GLOBAL_STATE.equals(currentFieldName)) { includeGlobalState = parser.booleanValue(); } } else if (token == XContentParser.Token.START_ARRAY) { if (INDICES.equals(currentFieldName)) { ArrayList<String> indicesArray = new ArrayList<>(); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { indicesArray.add(parser.text()); } indices = Collections.unmodifiableList(indicesArray); } else if (FAILURES.equals(currentFieldName)) { ArrayList<SnapshotShardFailure> shardFailureArrayList = new ArrayList<>(); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { shardFailureArrayList.add(SnapshotShardFailure.fromXContent(parser)); } shardFailures = Collections.unmodifiableList(shardFailureArrayList); } else { // It was probably created by newer version - ignoring parser.skipChildren(); } } else if (token == XContentParser.Token.START_OBJECT) { // It was probably created by newer version - ignoring parser.skipChildren(); } } } } } else { throw new ElasticsearchParseException("unexpected token [" + token + "]"); } if (uuid == null) { // the old format where there wasn't a UUID uuid = name; } return new SnapshotInfo(new SnapshotId(name, uuid), indices, state, reason, version, startTime, endTime, totalShards, successfulShards, shardFailures, includeGlobalState); }
Example 7
Source File: MetaData.java From crate with Apache License 2.0 | 4 votes |
public static MetaData fromXContent(XContentParser parser) throws IOException { Builder builder = new Builder(); // we might get here after the meta-data element, or on a fresh parser XContentParser.Token token = parser.currentToken(); String currentFieldName = parser.currentName(); if (!"meta-data".equals(currentFieldName)) { token = parser.nextToken(); if (token == XContentParser.Token.START_OBJECT) { // move to the field name (meta-data) token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { throw new IllegalArgumentException("Expected a field name but got " + token); } // move to the next object token = parser.nextToken(); } currentFieldName = parser.currentName(); } if (!"meta-data".equals(parser.currentName())) { throw new IllegalArgumentException("Expected [meta-data] as a field name but got " + currentFieldName); } if (token != XContentParser.Token.START_OBJECT) { throw new IllegalArgumentException("Expected a START_OBJECT but got " + token); } while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { if ("cluster_coordination".equals(currentFieldName)) { builder.coordinationMetaData(CoordinationMetaData.fromXContent(parser)); } else if ("settings".equals(currentFieldName)) { builder.persistentSettings(Settings.fromXContent(parser)); } else if ("indices".equals(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { builder.put(IndexMetaData.Builder.fromXContent(parser), false); } } else if ("templates".equals(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { builder.put(IndexTemplateMetaData.Builder.fromXContent(parser, parser.currentName())); } } else { try { Custom custom = parser.namedObject(Custom.class, currentFieldName, null); builder.putCustom(custom.getWriteableName(), custom); } catch (NamedObjectNotFoundException ex) { LOGGER.warn("Skipping unknown custom object with type {}", currentFieldName); parser.skipChildren(); } } } else if (token.isValue()) { if ("version".equals(currentFieldName)) { builder.version = parser.longValue(); } else if ("cluster_uuid".equals(currentFieldName) || "uuid".equals(currentFieldName)) { builder.clusterUUID = parser.text(); } else if ("cluster_uuid_committed".equals(currentFieldName)) { builder.clusterUUIDCommitted = parser.booleanValue(); } else { throw new IllegalArgumentException("Unexpected field [" + currentFieldName + "]"); } } else { throw new IllegalArgumentException("Unexpected token " + token); } } return builder.build(); }
Example 8
Source File: NumberFieldMapper.java From crate with Apache License 2.0 | 4 votes |
@Override public Long parse(XContentParser parser, boolean coerce) throws IOException { return parser.longValue(coerce); }
Example 9
Source File: BlobStoreIndexShardSnapshot.java From crate with Apache License 2.0 | 4 votes |
/** * Parses shard snapshot metadata * * @param parser parser * @return shard snapshot metadata */ public static BlobStoreIndexShardSnapshot fromXContent(XContentParser parser) throws IOException { String snapshot = null; long indexVersion = -1; long startTime = 0; long time = 0; int incrementalFileCount = 0; long incrementalSize = 0; List<FileInfo> indexFiles = new ArrayList<>(); if (parser.currentToken() == null) { // fresh parser? move to the first token parser.nextToken(); } XContentParser.Token token = parser.currentToken(); if (token == XContentParser.Token.START_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { String currentFieldName = parser.currentName(); token = parser.nextToken(); if (token.isValue()) { if (PARSE_NAME.match(currentFieldName, parser.getDeprecationHandler())) { snapshot = parser.text(); } else if (PARSE_INDEX_VERSION.match(currentFieldName, parser.getDeprecationHandler())) { // The index-version is needed for backward compatibility with v 1.0 indexVersion = parser.longValue(); } else if (PARSE_START_TIME.match(currentFieldName, parser.getDeprecationHandler())) { startTime = parser.longValue(); } else if (PARSE_TIME.match(currentFieldName, parser.getDeprecationHandler())) { time = parser.longValue(); } else if (PARSE_INCREMENTAL_FILE_COUNT.match(currentFieldName, parser.getDeprecationHandler())) { incrementalFileCount = parser.intValue(); } else if (PARSE_INCREMENTAL_SIZE.match(currentFieldName, parser.getDeprecationHandler())) { incrementalSize = parser.longValue(); } else { throw new ElasticsearchParseException("unknown parameter [{}]", currentFieldName); } } else if (token == XContentParser.Token.START_ARRAY) { if (PARSE_FILES.match(currentFieldName, parser.getDeprecationHandler())) { while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) { indexFiles.add(FileInfo.fromXContent(parser)); } } else { throw new ElasticsearchParseException("unknown parameter [{}]", currentFieldName); } } else { throw new ElasticsearchParseException("unexpected token [{}]", token); } } else { throw new ElasticsearchParseException("unexpected token [{}]", token); } } } return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, Collections.unmodifiableList(indexFiles), startTime, time, incrementalFileCount, incrementalSize); }
Example 10
Source File: TermsEnumTermsQueryParser.java From siren-join with GNU Affero General Public License v3.0 | 4 votes |
@Override public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { throw new QueryParsingException(parseContext, "[termsenum_terms] a field name is required"); } String fieldName = parser.currentName(); String queryName = null; byte[] value = null; Long cacheKey = null; token = parser.nextToken(); if (token == XContentParser.Token.START_OBJECT) { String currentFieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else { if ("value".equals(currentFieldName)) { value = parser.binaryValue(); } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { cacheKey = parser.longValue(); } else { throw new QueryParsingException(parseContext, "[termsenum_terms] filter does not support [" + currentFieldName + "]"); } } } parser.nextToken(); } else { value = parser.binaryValue(); // move to the next token parser.nextToken(); } if (value == null) { throw new QueryParsingException(parseContext, "[termsenum_terms] a binary value is required"); } if (cacheKey == null) { // cache key is mandatory - see #170 throw new QueryParsingException(parseContext, "[termsenum_terms] a cache key is required"); } if (fieldName == null) { throw new QueryParsingException(parseContext, "[termsenum_terms] a field name is required"); } MappedFieldType fieldType = parseContext.fieldMapper(fieldName); if (fieldType == null) { return new MatchNoDocsQuery(); } Query query = new TermsEnumTermsQuery(value, fieldName, cacheKey); if (queryName != null) { parseContext.addNamedQuery(queryName, query); } return query; }
Example 11
Source File: FieldDataTermsQueryParser.java From siren-join with GNU Affero General Public License v3.0 | 4 votes |
@Override public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { throw new QueryParsingException(parseContext, "[fielddata_terms] a field name is required"); } String fieldName = parser.currentName(); String queryName = null; byte[] value = null; Long cacheKey = null; token = parser.nextToken(); if (token == XContentParser.Token.START_OBJECT) { String currentFieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else { if ("value".equals(currentFieldName)) { value = parser.binaryValue(); } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { cacheKey = parser.longValue(); } else { throw new QueryParsingException(parseContext, "[fielddata_terms] filter does not support [" + currentFieldName + "]"); } } } parser.nextToken(); } else { value = parser.binaryValue(); // move to the next token parser.nextToken(); } if (value == null) { throw new QueryParsingException(parseContext, "[fielddata_terms] a binary value is required"); } if (cacheKey == null) { // cache key is mandatory - see #170 throw new QueryParsingException(parseContext, "[fielddata_terms] a cache key is required"); } if (fieldName == null) { throw new QueryParsingException(parseContext, "[fielddata_terms] a field name is required"); } MappedFieldType fieldType = parseContext.fieldMapper(fieldName); if (fieldType == null) { return new MatchNoDocsQuery(); } IndexFieldData fieldData = parseContext.getForField(fieldType); Query query = this.toFieldDataTermsQuery(fieldType, fieldData, value, cacheKey); if (queryName != null) { parseContext.addNamedQuery(queryName, query); } return query; }
Example 12
Source File: TermVectorsRequest.java From Elasticsearch with Apache License 2.0 | 4 votes |
/** * populates a request object (pre-populated with defaults) based on a parser. */ public static void parseRequest(TermVectorsRequest termVectorsRequest, XContentParser parser) throws IOException { XContentParser.Token token; String currentFieldName = null; List<String> fields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (currentFieldName != null) { if (currentFieldName.equals("fields")) { if (token == XContentParser.Token.START_ARRAY) { while (parser.nextToken() != XContentParser.Token.END_ARRAY) { fields.add(parser.text()); } } else { throw new ElasticsearchParseException("failed to parse term vectors request. field [fields] must be an array"); } } else if (currentFieldName.equals("offsets")) { termVectorsRequest.offsets(parser.booleanValue()); } else if (currentFieldName.equals("positions")) { termVectorsRequest.positions(parser.booleanValue()); } else if (currentFieldName.equals("payloads")) { termVectorsRequest.payloads(parser.booleanValue()); } else if (currentFieldName.equals("term_statistics") || currentFieldName.equals("termStatistics")) { termVectorsRequest.termStatistics(parser.booleanValue()); } else if (currentFieldName.equals("field_statistics") || currentFieldName.equals("fieldStatistics")) { termVectorsRequest.fieldStatistics(parser.booleanValue()); } else if (currentFieldName.equals("dfs")) { termVectorsRequest.dfs(parser.booleanValue()); } else if (currentFieldName.equals("per_field_analyzer") || currentFieldName.equals("perFieldAnalyzer")) { termVectorsRequest.perFieldAnalyzer(readPerFieldAnalyzer(parser.map())); } else if (currentFieldName.equals("filter")) { termVectorsRequest.filterSettings(readFilterSettings(parser, termVectorsRequest)); } else if ("_index".equals(currentFieldName)) { // the following is important for multi request parsing. termVectorsRequest.index = parser.text(); } else if ("_type".equals(currentFieldName)) { termVectorsRequest.type = parser.text(); } else if ("_id".equals(currentFieldName)) { if (termVectorsRequest.doc != null) { throw new ElasticsearchParseException("failed to parse term vectors request. either [id] or [doc] can be specified, but not both!"); } termVectorsRequest.id = parser.text(); } else if ("doc".equals(currentFieldName)) { if (termVectorsRequest.id != null) { throw new ElasticsearchParseException("failed to parse term vectors request. either [id] or [doc] can be specified, but not both!"); } termVectorsRequest.doc(jsonBuilder().copyCurrentStructure(parser)); } else if ("_routing".equals(currentFieldName) || "routing".equals(currentFieldName)) { termVectorsRequest.routing = parser.text(); } else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) { termVectorsRequest.version = parser.longValue(); } else if ("_version_type".equals(currentFieldName) || "_versionType".equals(currentFieldName) || "version_type".equals(currentFieldName) || "versionType".equals(currentFieldName)) { termVectorsRequest.versionType = VersionType.fromString(parser.text()); } else { throw new ElasticsearchParseException("failed to parse term vectors request. unknown field [{}]", currentFieldName); } } } if (fields.size() > 0) { String[] fieldsAsArray = new String[fields.size()]; termVectorsRequest.selectedFields(fields.toArray(fieldsAsArray)); } }
Example 13
Source File: BlobStoreIndexShardSnapshot.java From Elasticsearch with Apache License 2.0 | 4 votes |
/** * Parses shard snapshot metadata * * @param parser parser * @return shard snapshot metadata */ public BlobStoreIndexShardSnapshot fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { String snapshot = null; long indexVersion = -1; long startTime = 0; long time = 0; int numberOfFiles = 0; long totalSize = 0; List<FileInfo> indexFiles = new ArrayList<>(); if (parser.currentToken() == null) { // fresh parser? move to the first token parser.nextToken(); } XContentParser.Token token = parser.currentToken(); if (token == XContentParser.Token.START_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { String currentFieldName = parser.currentName(); token = parser.nextToken(); if (token.isValue()) { if (parseFieldMatcher.match(currentFieldName, ParseFields.NAME)) { snapshot = parser.text(); } else if (parseFieldMatcher.match(currentFieldName, ParseFields.INDEX_VERSION)) { // The index-version is needed for backward compatibility with v 1.0 indexVersion = parser.longValue(); } else if (parseFieldMatcher.match(currentFieldName, ParseFields.START_TIME)) { startTime = parser.longValue(); } else if (parseFieldMatcher.match(currentFieldName, ParseFields.TIME)) { time = parser.longValue(); } else if (parseFieldMatcher.match(currentFieldName, ParseFields.NUMBER_OF_FILES)) { numberOfFiles = parser.intValue(); } else if (parseFieldMatcher.match(currentFieldName, ParseFields.TOTAL_SIZE)) { totalSize = parser.longValue(); } else { throw new ElasticsearchParseException("unknown parameter [{}]", currentFieldName); } } else if (token == XContentParser.Token.START_ARRAY) { if (parseFieldMatcher.match(currentFieldName, ParseFields.FILES)) { while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) { indexFiles.add(FileInfo.fromXContent(parser)); } } else { throw new ElasticsearchParseException("unknown parameter [{}]", currentFieldName); } } else { throw new ElasticsearchParseException("unexpected token [{}]", token); } } else { throw new ElasticsearchParseException("unexpected token [{}]", token); } } } return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, Collections.unmodifiableList(indexFiles), startTime, time, numberOfFiles, totalSize); }
Example 14
Source File: HistogramParser.java From Elasticsearch with Apache License 2.0 | 4 votes |
@Override public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException { ValuesSourceParser vsParser = ValuesSourceParser.numeric(aggregationName, InternalHistogram.TYPE, context) .targetValueType(ValueType.NUMERIC) .formattable(true) .build(); boolean keyed = false; long minDocCount = 0; InternalOrder order = (InternalOrder) InternalOrder.KEY_ASC; long interval = -1; ExtendedBounds extendedBounds = null; long offset = 0; XContentParser.Token token; String currentFieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (vsParser.token(currentFieldName, token, parser)) { continue; } else if (token.isValue()) { if ("interval".equals(currentFieldName)) { interval = parser.longValue(); } else if ("min_doc_count".equals(currentFieldName) || "minDocCount".equals(currentFieldName)) { minDocCount = parser.longValue(); } else if ("keyed".equals(currentFieldName)) { keyed = parser.booleanValue(); } else if ("offset".equals(currentFieldName)) { offset = parser.longValue(); } else { throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { if ("order".equals(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { String dir = parser.text(); boolean asc = "asc".equals(dir); if (!asc && !"desc".equals(dir)) { throw new SearchParseException(context, "Unknown order direction [" + dir + "] in aggregation [" + aggregationName + "]. Should be either [asc] or [desc]", parser.getTokenLocation()); } order = resolveOrder(currentFieldName, asc); } } } else if (context.parseFieldMatcher().match(currentFieldName, EXTENDED_BOUNDS)) { extendedBounds = new ExtendedBounds(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { if ("min".equals(currentFieldName)) { extendedBounds.min = parser.longValue(true); } else if ("max".equals(currentFieldName)) { extendedBounds.max = parser.longValue(true); } else { throw new SearchParseException(context, "Unknown extended_bounds key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } } } else { throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } else { throw new SearchParseException(context, "Unexpected token " + token + " in aggregation [" + aggregationName + "].", parser.getTokenLocation()); } } if (interval < 1) { throw new SearchParseException(context, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]", parser.getTokenLocation()); } Rounding rounding = new Rounding.Interval(interval); if (offset != 0) { rounding = new Rounding.OffsetRounding((Rounding.Interval) rounding, offset); } if (extendedBounds != null) { // with numeric histogram, we can process here and fail fast if the bounds are invalid extendedBounds.processAndValidate(aggregationName, context, ValueParser.RAW); } return new HistogramAggregator.Factory(aggregationName, vsParser.config(), rounding, order, keyed, minDocCount, extendedBounds, new InternalHistogram.Factory()); }
Example 15
Source File: TenantProperty.java From Elasticsearch with Apache License 2.0 | 4 votes |
public static TenantProperty fromXContent(XContentParser parser) throws IOException { Builder builder = new Builder(); XContentParser.Token token = parser.currentToken(); String currentFieldName = parser.currentName(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { if ("name".equalsIgnoreCase(currentFieldName)) { builder.tenantName = parser.text(); } } else if (token == XContentParser.Token.VALUE_NUMBER) { if ("id".equalsIgnoreCase(currentFieldName)) { builder.tenantId = parser.longValue(); } else if ("desire_node_num".equalsIgnoreCase(currentFieldName)) { builder.desireInstanceNum = parser.intValue(); } } else if (token == XContentParser.Token.START_ARRAY) { if ("nodes".equalsIgnoreCase(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.START_OBJECT) { String ipaddress = ""; int port = 0; AllocatedNodeStatus allocatedNodeStatus = AllocatedNodeStatus.NORMAL; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { if ("ip".equalsIgnoreCase(currentFieldName)) { ipaddress = parser.text(); } else if ("status".equalsIgnoreCase(currentFieldName)) { allocatedNodeStatus = AllocatedNodeStatus.valueOf(parser.text()); } } else if (token == XContentParser.Token.VALUE_NUMBER) { if ("port".equalsIgnoreCase(currentFieldName)) { port = parser.intValue(); } } } AllocatedNodeInfo nodeId = new AllocatedNodeInfo(ipaddress, port, allocatedNodeStatus); builder.addNode(nodeId); } } } } } return builder.build(); }
Example 16
Source File: AnomalyDetectorRestTestCase.java From anomaly-detection with Apache License 2.0 | 4 votes |
public ToXContentObject[] getAnomalyDetector(String detectorId, BasicHeader header, boolean returnJob) throws IOException { Response response = TestHelpers .makeRequest( client(), "GET", TestHelpers.AD_BASE_DETECTORS_URI + "/" + detectorId + "?job=" + returnJob, null, "", ImmutableList.of(header) ); assertEquals("Unable to get anomaly detector " + detectorId, RestStatus.OK, restStatus(response)); XContentParser parser = createAdParser(XContentType.JSON.xContent(), response.getEntity().getContent()); parser.nextToken(); XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); String id = null; Long version = null; AnomalyDetector detector = null; AnomalyDetectorJob detectorJob = null; while (parser.nextToken() != XContentParser.Token.END_OBJECT) { String fieldName = parser.currentName(); parser.nextToken(); switch (fieldName) { case "_id": id = parser.text(); break; case "_version": version = parser.longValue(); break; case "anomaly_detector": detector = AnomalyDetector.parse(parser); break; case "anomaly_detector_job": detectorJob = AnomalyDetectorJob.parse(parser); break; } } return new ToXContentObject[] { new AnomalyDetector( id, version, detector.getName(), detector.getDescription(), detector.getTimeField(), detector.getIndices(), detector.getFeatureAttributes(), detector.getFilterQuery(), detector.getDetectionInterval(), detector.getWindowDelay(), detector.getUiMetadata(), detector.getSchemaVersion(), detector.getLastUpdateTime() ), detectorJob }; }
Example 17
Source File: AnomalyDetectorJob.java From anomaly-detection with Apache License 2.0 | 4 votes |
public static AnomalyDetectorJob parse(XContentParser parser) throws IOException { String name = null; Schedule schedule = null; TimeConfiguration windowDelay = null; // we cannot set it to null as isEnabled() would do the unboxing and results in null pointer exception Boolean isEnabled = Boolean.FALSE; Instant enabledTime = null; Instant disabledTime = null; Instant lastUpdateTime = null; Long lockDurationSeconds = DEFAULT_AD_JOB_LOC_DURATION_SECONDS; ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { String fieldName = parser.currentName(); parser.nextToken(); switch (fieldName) { case NAME_FIELD: name = parser.text(); break; case SCHEDULE_FIELD: schedule = ScheduleParser.parse(parser); break; case WINDOW_DELAY_FIELD: windowDelay = TimeConfiguration.parse(parser); break; case IS_ENABLED_FIELD: isEnabled = parser.booleanValue(); break; case ENABLED_TIME_FIELD: enabledTime = ParseUtils.toInstant(parser); break; case DISABLED_TIME_FIELD: disabledTime = ParseUtils.toInstant(parser); break; case LAST_UPDATE_TIME_FIELD: lastUpdateTime = ParseUtils.toInstant(parser); break; case LOCK_DURATION_SECONDS: lockDurationSeconds = parser.longValue(); break; default: parser.skipChildren(); break; } } return new AnomalyDetectorJob( name, schedule, windowDelay, isEnabled, enabledTime, disabledTime, lastUpdateTime, lockDurationSeconds ); }