Java Code Examples for org.elasticsearch.common.unit.TimeValue#parseTimeValue()
The following examples show how to use
org.elasticsearch.common.unit.TimeValue#parseTimeValue() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ExpressionToTimeValueVisitor.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override protected TimeValue visitStringLiteral(StringLiteral node, Context context) { try { return TimeValue.parseTimeValue(node.getValue(), DEFAULT_VALUE, context.settingName); } catch (ElasticsearchParseException e) { throw new IllegalArgumentException( String.format(Locale.ENGLISH, "Invalid time value '%s'", node.getValue())); } }
Example 2
Source File: DecayFunctionParser.java From Elasticsearch with Apache License 2.0 | 5 votes |
private AbstractDistanceScoreFunction parseDateVariable(String fieldName, XContentParser parser, QueryParseContext parseContext, DateFieldMapper.DateFieldType dateFieldType, MultiValueMode mode) throws IOException { XContentParser.Token token; String parameterName = null; String scaleString = null; String originString = null; String offsetString = "0d"; double decay = 0.5; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { parameterName = parser.currentName(); } else if (parameterName.equals(DecayFunctionBuilder.SCALE)) { scaleString = parser.text(); } else if (parameterName.equals(DecayFunctionBuilder.ORIGIN)) { originString = parser.text(); } else if (parameterName.equals(DecayFunctionBuilder.DECAY)) { decay = parser.doubleValue(); } else if (parameterName.equals(DecayFunctionBuilder.OFFSET)) { offsetString = parser.text(); } else { throw new ElasticsearchParseException("parameter [{}] not supported!", parameterName); } } long origin = SearchContext.current().nowInMillis(); if (originString != null) { origin = dateFieldType.parseToMilliseconds(originString, false, null, null); } if (scaleString == null) { throw new ElasticsearchParseException("[{}] must be set for date fields.", DecayFunctionBuilder.SCALE); } TimeValue val = TimeValue.parseTimeValue(scaleString, TimeValue.timeValueHours(24), getClass().getSimpleName() + ".scale"); double scale = val.getMillis(); val = TimeValue.parseTimeValue(offsetString, TimeValue.timeValueHours(24), getClass().getSimpleName() + ".offset"); double offset = val.getMillis(); IndexNumericFieldData numericFieldData = parseContext.getForField(dateFieldType); return new NumericFieldDataScoreFunction(origin, scale, decay, offset, getDecayFunction(), numericFieldData, mode); }
Example 3
Source File: UpdateHelper.java From Elasticsearch with Apache License 2.0 | 5 votes |
private TimeValue getTTLFromScriptContext(Map<String, Object> ctx) { Object fetchedTTL = ctx.get("_ttl"); if (fetchedTTL != null) { if (fetchedTTL instanceof Number) { return new TimeValue(((Number) fetchedTTL).longValue()); } return TimeValue.parseTimeValue((String) fetchedTTL, null, "_ttl"); } return null; }
Example 4
Source File: ElasticSearchConnection.java From storm-crawler with Apache License 2.0 | 5 votes |
public static ElasticSearchConnection getConnection(Map stormConf, String boltType, BulkProcessor.Listener listener) { String flushIntervalString = ConfUtils.getString(stormConf, "es." + boltType + ".flushInterval", "5s"); TimeValue flushInterval = TimeValue.parseTimeValue(flushIntervalString, TimeValue.timeValueSeconds(5), "flushInterval"); int bulkActions = ConfUtils.getInt(stormConf, "es." + boltType + ".bulkActions", 50); int concurrentRequests = ConfUtils.getInt(stormConf, "es." + boltType + ".concurrentRequests", 1); RestHighLevelClient client = getClient(stormConf, boltType); boolean sniff = ConfUtils.getBoolean(stormConf, "es." + boltType + ".sniff", true); Sniffer sniffer = null; if (sniff) { sniffer = Sniffer.builder(client.getLowLevelClient()).build(); } BulkProcessor bulkProcessor = BulkProcessor .builder((request, bulkListener) -> client.bulkAsync(request, RequestOptions.DEFAULT, bulkListener), listener) .setFlushInterval(flushInterval).setBulkActions(bulkActions) .setConcurrentRequests(concurrentRequests).build(); return new ElasticSearchConnection(client, bulkProcessor, sniffer); }
Example 5
Source File: DerivativeParser.java From Elasticsearch with Apache License 2.0 | 4 votes |
@Override public PipelineAggregatorFactory parse(String pipelineAggregatorName, XContentParser parser, SearchContext context) throws IOException { XContentParser.Token token; String currentFieldName = null; String[] bucketsPaths = null; String format = null; String units = null; GapPolicy gapPolicy = GapPolicy.SKIP; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { if (context.parseFieldMatcher().match(currentFieldName, FORMAT)) { format = parser.text(); } else if (context.parseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { bucketsPaths = new String[] { parser.text() }; } else if (context.parseFieldMatcher().match(currentFieldName, GAP_POLICY)) { gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); } else if (context.parseFieldMatcher().match(currentFieldName, UNIT)) { units = parser.text(); } else { throw new SearchParseException(context, "Unknown key for a " + token + " in [" + pipelineAggregatorName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_ARRAY) { if (context.parseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { List<String> paths = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String path = parser.text(); paths.add(path); } bucketsPaths = paths.toArray(new String[paths.size()]); } else { throw new SearchParseException(context, "Unknown key for a " + token + " in [" + pipelineAggregatorName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } else { throw new SearchParseException(context, "Unexpected token " + token + " in [" + pipelineAggregatorName + "].", parser.getTokenLocation()); } } if (bucketsPaths == null) { throw new SearchParseException(context, "Missing required field [" + BUCKETS_PATH.getPreferredName() + "] for derivative aggregation [" + pipelineAggregatorName + "]", parser.getTokenLocation()); } ValueFormatter formatter = null; if (format != null) { formatter = ValueFormat.Patternable.Number.format(format).formatter(); } else { formatter = ValueFormatter.RAW; } Long xAxisUnits = null; if (units != null) { DateTimeUnit dateTimeUnit = DateHistogramParser.DATE_FIELD_UNITS.get(units); if (dateTimeUnit != null) { xAxisUnits = dateTimeUnit.field().getDurationField().getUnitMillis(); } else { TimeValue timeValue = TimeValue.parseTimeValue(units, null, getClass().getSimpleName() + ".unit"); if (timeValue != null) { xAxisUnits = timeValue.getMillis(); } } } return new DerivativePipelineAggregator.Factory(pipelineAggregatorName, bucketsPaths, formatter, gapPolicy, xAxisUnits); }
Example 6
Source File: XContentMapValues.java From Elasticsearch with Apache License 2.0 | 4 votes |
public static TimeValue nodeTimeValue(Object node) { if (node instanceof Number) { return TimeValue.timeValueMillis(((Number) node).longValue()); } return TimeValue.parseTimeValue(node.toString(), null, XContentMapValues.class.getSimpleName() + ".nodeTimeValue"); }
Example 7
Source File: BaseTasksRequest.java From Elasticsearch with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") public final T setTimeout(String timeout) { this.timeout = TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout"); return (T) this; }
Example 8
Source File: ElasticsearchSearchIndexConfiguration.java From vertexium with Apache License 2.0 | 4 votes |
public TimeValue getScrollKeepAlive() { String value = graphConfiguration.getString(GraphConfiguration.SEARCH_INDEX_PROP_PREFIX + "." + QUERY_SCROLL_KEEP_ALIVE, QUERY_SCROLL_KEEP_ALIVE_DEFAULT); return TimeValue.parseTimeValue(value, null, ""); }
Example 9
Source File: ElasticsearchSearchIndexConfiguration.java From vertexium with Apache License 2.0 | 4 votes |
public TimeValue getScrollKeepAlive() { String value = graphConfiguration.getString(GraphConfiguration.SEARCH_INDEX_PROP_PREFIX + "." + QUERY_SCROLL_KEEP_ALIVE, QUERY_SCROLL_KEEP_ALIVE_DEFAULT); return TimeValue.parseTimeValue(value, null, ""); }
Example 10
Source File: ElasticsearchProvider.java From log4j2-elasticsearch with Apache License 2.0 | 4 votes |
/** * Factory method for creating an Elasticsearch provider within the plugin manager. * * @param cluster The name of the Elasticsearch cluster to which log event documents will be written. * @param host The host name an Elasticsearch server node of the cluster, defaults to localhost. * @param port The port that Elasticsearch is listening on, defaults to 9300 * @param index The index that Elasticsearch shall use for indexing * @param type The type of the index Elasticsearch shall use for indexing * @return a new Elasticsearch provider */ @PluginFactory public static ElasticsearchProvider createNoSqlProvider( @PluginAttribute("cluster") String cluster, @PluginAttribute("host") String host, @PluginAttribute("port") Integer port, @PluginAttribute("index") String index, @PluginAttribute("type") String type, @PluginAttribute("timeout") String timeout, @PluginAttribute("maxActionsPerBulkRequest") Integer maxActionsPerBulkRequest, @PluginAttribute("maxConcurrentBulkRequests") Integer maxConcurrentBulkRequests, @PluginAttribute("maxVolumePerBulkRequest") String maxVolumePerBulkRequest, @PluginAttribute("flushInterval") String flushInterval) { if (cluster == null || cluster.isEmpty()) { cluster = "elasticsearch"; } if (host == null || host.isEmpty()) { host = "localhost"; } if (port == null || port == 0) { port = 9300; } if (index == null || index.isEmpty()) { index = "log4j2"; } if (type == null || type.isEmpty()) { type = "log4j2"; } if (timeout == null || timeout.isEmpty()) { timeout = "30s"; } if (maxActionsPerBulkRequest == null) { maxActionsPerBulkRequest = 1000; } if (maxConcurrentBulkRequests == null) { maxConcurrentBulkRequests = 2 * Runtime.getRuntime().availableProcessors(); } if (maxVolumePerBulkRequest == null || maxVolumePerBulkRequest.isEmpty()) { maxVolumePerBulkRequest = "10m"; } Settings settings = settingsBuilder() .put("cluster.name", cluster) .put("network.server", false) .put("node.client", true) .put("client.transport.sniff", true) .put("client.transport.ping_timeout", timeout) .put("client.transport.ignore_cluster_name", false) .put("client.transport.nodes_sampler_interval", "30s") .build(); TransportClient client = new TransportClient(settings, false); client.addTransportAddress(new InetSocketTransportAddress(host, port)); if (client.connectedNodes().isEmpty()) { logger.error("unable to connect to Elasticsearch cluster"); return null; } String description = "cluster=" + cluster + ",host=" + host + ",port=" + port + ",index=" + index + ",type=" + type; ElasticsearchTransportClient elasticsearchTransportClient = new ElasticsearchTransportClient(client, index, type, maxActionsPerBulkRequest, maxConcurrentBulkRequests, ByteSizeValue.parseBytesSizeValue(maxVolumePerBulkRequest), TimeValue.parseTimeValue(flushInterval, TimeValue.timeValueSeconds(30))); ElasticsearchProvider elasticsearchProvider = new ElasticsearchProvider(elasticsearchTransportClient, description); return elasticsearchProvider; }
Example 11
Source File: KafkaRiverConfig.java From elasticsearch-river-kafka with Apache License 2.0 | 4 votes |
public KafkaRiverConfig(RiverSettings settings) { if (settings.settings().containsKey("kafka")) { Map<String, Object> kafkaSettings = (Map<String, Object>) settings.settings().get("kafka"); topic = (String)kafkaSettings.get("topic"); zookeeper = XContentMapValues.nodeStringValue(kafkaSettings.get("zookeeper"), "localhost"); factoryClass = XContentMapValues.nodeStringValue(kafkaSettings.get("message_handler_factory_class"), "org.elasticsearch.river.kafka.JsonMessageHandlerFactory"); brokerHost = XContentMapValues.nodeStringValue(kafkaSettings.get("broker_host"), "localhost"); brokerPort = XContentMapValues.nodeIntegerValue(kafkaSettings.get("broker_port"), 9092); partition = XContentMapValues.nodeIntegerValue(kafkaSettings.get("partition"), 0); } else { zookeeper = "localhost"; brokerHost = "localhost"; brokerPort = 9092; topic = "default_topic"; partition = 0; factoryClass = "org.elasticsearch.river.kafka.JsonMessageHandlerFactory"; } if (settings.settings().containsKey("index")) { Map<String, Object> indexSettings = (Map<String, Object>) settings.settings().get("index"); bulkSize = XContentMapValues.nodeIntegerValue(indexSettings.get("bulk_size_bytes"), 10*1024*1024); if (indexSettings.containsKey("bulk_timeout")) { bulkTimeout = TimeValue.parseTimeValue(XContentMapValues.nodeStringValue(indexSettings.get("bulk_timeout"), "10ms"), TimeValue.timeValueMillis(10000)); } else { bulkTimeout = TimeValue.timeValueMillis(10); } } else { bulkSize = 10*1024*1024; bulkTimeout = TimeValue.timeValueMillis(10000); } if (settings.settings().containsKey("statsd")) { Map<String, Object> statsdSettings = (Map<String, Object>) settings.settings().get("statsd"); statsdHost = (String)statsdSettings.get("host"); statsdPort = XContentMapValues.nodeIntegerValue(statsdSettings.get("port"), 8125); statsdPrefix = XContentMapValues.nodeStringValue(statsdSettings.get("prefix"), "es-kafka-river"); } else { statsdHost = null; statsdPort = -1; statsdPrefix = null; } }
Example 12
Source File: RiverConfig.java From elasticsearch-river-kafka with Apache License 2.0 | 4 votes |
public RiverConfig(RiverName riverName, RiverSettings riverSettings) { // Extract kafka related configuration if (riverSettings.settings().containsKey("kafka")) { Map<String, Object> kafkaSettings = (Map<String, Object>) riverSettings.settings().get("kafka"); topic = (String) kafkaSettings.get(TOPIC); zookeeperConnect = XContentMapValues.nodeStringValue(kafkaSettings.get(ZOOKEEPER_CONNECT), "localhost"); zookeeperConnectionTimeout = XContentMapValues.nodeIntegerValue(kafkaSettings.get(ZOOKEEPER_CONNECTION_TIMEOUT), 10000); messageType = MessageType.fromValue(XContentMapValues.nodeStringValue(kafkaSettings.get(MESSAGE_TYPE), MessageType.JSON.toValue())); } else { zookeeperConnect = "localhost"; zookeeperConnectionTimeout = 10000; topic = "elasticsearch-river-kafka"; messageType = MessageType.JSON; } // Extract ElasticSearch related configuration if (riverSettings.settings().containsKey("index")) { Map<String, Object> indexSettings = (Map<String, Object>) riverSettings.settings().get("index"); indexName = XContentMapValues.nodeStringValue(indexSettings.get(INDEX_NAME), riverName.name()); typeName = XContentMapValues.nodeStringValue(indexSettings.get(MAPPING_TYPE), "status"); bulkSize = XContentMapValues.nodeIntegerValue(indexSettings.get(BULK_SIZE), 100); concurrentRequests = XContentMapValues.nodeIntegerValue(indexSettings.get(CONCURRENT_REQUESTS), 1); actionType = ActionType.fromValue(XContentMapValues.nodeStringValue(indexSettings.get(ACTION_TYPE), ActionType.INDEX.toValue())); flushInterval = TimeValue.parseTimeValue(XContentMapValues.nodeStringValue(indexSettings.get(FLUSH_INTERVAL), "12h"), FLUSH_12H); } else { indexName = riverName.name(); typeName = "status"; bulkSize = 100; concurrentRequests = 1; actionType = ActionType.INDEX; flushInterval = FLUSH_12H; } // Extract StatsD related configuration if (riverSettings.settings().containsKey("statsd")) { Map<String, Object> statsdSettings = (Map<String, Object>) riverSettings.settings().get("statsd"); statsdHost = XContentMapValues.nodeStringValue(statsdSettings.get(STATSD_HOST), "localhost"); statsdPrefix = XContentMapValues.nodeStringValue(statsdSettings.get(STATSD_PREFIX), "kafka_river"); statsdPort = XContentMapValues.nodeIntegerValue(statsdSettings.get(STATSD_PORT), 8125); statsdIntervalInSeconds = XContentMapValues.nodeIntegerValue(statsdSettings.get(STATSD_INTERVAL_IN_SECONDS), 10); } }