org.apache.kafka.streams.kstream.Aggregator Java Examples
The following examples show how to use
org.apache.kafka.streams.kstream.Aggregator.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CogroupingStreams.java From kafka-tutorials with Apache License 2.0 | 5 votes |
public Topology buildTopology(Properties envProps) { final StreamsBuilder builder = new StreamsBuilder(); final String appOneInputTopic = envProps.getProperty("app-one.topic.name"); final String appTwoInputTopic = envProps.getProperty("app-two.topic.name"); final String appThreeInputTopic = envProps.getProperty("app-three.topic.name"); final String totalResultOutputTopic = envProps.getProperty("output.topic.name"); final Serde<String> stringSerde = getPrimitiveAvroSerde(envProps, true); final Serde<LoginEvent> loginEventSerde = getSpecificAvroSerde(envProps); final Serde<LoginRollup> loginRollupSerde = getSpecificAvroSerde(envProps); final KStream<String, LoginEvent> appOneStream = builder.stream(appOneInputTopic, Consumed.with(stringSerde, loginEventSerde)); final KStream<String, LoginEvent> appTwoStream = builder.stream(appTwoInputTopic, Consumed.with(stringSerde, loginEventSerde)); final KStream<String, LoginEvent> appThreeStream = builder.stream(appThreeInputTopic, Consumed.with(stringSerde, loginEventSerde)); final Aggregator<String, LoginEvent, LoginRollup> loginAggregator = new LoginAggregator(); final KGroupedStream<String, LoginEvent> appOneGrouped = appOneStream.groupByKey(); final KGroupedStream<String, LoginEvent> appTwoGrouped = appTwoStream.groupByKey(); final KGroupedStream<String, LoginEvent> appThreeGrouped = appThreeStream.groupByKey(); appOneGrouped.cogroup(loginAggregator) .cogroup(appTwoGrouped, loginAggregator) .cogroup(appThreeGrouped, loginAggregator) .aggregate(() -> new LoginRollup(new HashMap<>()), Materialized.with(Serdes.String(), loginRollupSerde)) .toStream().to(totalResultOutputTopic, Produced.with(stringSerde, loginRollupSerde)); return builder.build(); }
Example #2
Source File: SpanAggregationTopology.java From zipkin-storage-kafka with Apache License 2.0 | 4 votes |
Aggregator<String, List<Span>, List<Span>> aggregateSpans() { return (traceId, spans, allSpans) -> { allSpans.addAll(spans); return Trace.merge(allSpans); }; }
Example #3
Source File: StockPerformanceInteractiveQueryApplication.java From kafka-streams-in-action with Apache License 2.0 | 4 votes |
public static void main(String[] args) { if(args.length < 2){ LOG.error("Need to specify host, port"); System.exit(1); } String host = args[0]; int port = Integer.parseInt(args[1]); final HostInfo hostInfo = new HostInfo(host, port); Properties properties = getProperties(); properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, host+":"+port); StreamsConfig streamsConfig = new StreamsConfig(properties); Serde<String> stringSerde = Serdes.String(); Serde<Long> longSerde = Serdes.Long(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); WindowedSerializer<String> windowedSerializer = new WindowedSerializer<>(stringSerde.serializer()); WindowedDeserializer<String> windowedDeserializer = new WindowedDeserializer<>(stringSerde.deserializer()); Serde<Windowed<String>> windowedSerde = Serdes.serdeFrom(windowedSerializer, windowedDeserializer); Serde<CustomerTransactions> customerTransactionsSerde = StreamsSerdes.CustomerTransactionsSerde(); Aggregator<String, StockTransaction, Integer> sharesAggregator = (k, v, i) -> v.getShares() + i; StreamsBuilder builder = new StreamsBuilder(); // data is already coming in keyed KStream<String, StockTransaction> stockTransactionKStream = builder.stream(MockDataProducer.STOCK_TRANSACTIONS_TOPIC, Consumed.with(stringSerde, stockTransactionSerde) .withOffsetResetPolicy(Topology.AutoOffsetReset.LATEST)); stockTransactionKStream.map((k,v) -> KeyValue.pair(v.getSector(), v)) .groupByKey(Serialized.with(stringSerde, stockTransactionSerde)) .count(Materialized.as("TransactionsBySector")) .toStream() .peek((k,v) -> LOG.info("Transaction count for {} {}", k, v)) .to("sector-transaction-counts", Produced.with(stringSerde, longSerde)); stockTransactionKStream.map((k,v) -> KeyValue.pair(v.getCustomerId(), v)) .groupByKey(Serialized.with(stringSerde, stockTransactionSerde)) .windowedBy(SessionWindows.with(TimeUnit.MINUTES.toMillis(60)).until(TimeUnit.MINUTES.toMillis(120))) .aggregate(CustomerTransactions::new,(k, v, ct) -> ct.update(v), (k, ct, other)-> ct.merge(other), Materialized.<String, CustomerTransactions, SessionStore<Bytes, byte[]>>as("CustomerPurchaseSessions") .withKeySerde(stringSerde).withValueSerde(customerTransactionsSerde)) .toStream() .peek((k,v) -> LOG.info("Session info for {} {}", k, v)) .to("session-transactions", Produced.with(windowedSerde, customerTransactionsSerde)); stockTransactionKStream.groupByKey(Serialized.with(stringSerde, stockTransactionSerde)) .windowedBy(TimeWindows.of(10000)) .aggregate(() -> 0, sharesAggregator, Materialized.<String, Integer, WindowStore<Bytes, byte[]>>as("NumberSharesPerPeriod") .withKeySerde(stringSerde) .withValueSerde(Serdes.Integer())) .toStream().peek((k,v)->LOG.info("key is {} value is {}", k, v)) .to("transaction-count", Produced.with(windowedSerde,Serdes.Integer())); KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig); InteractiveQueryServer queryServer = new InteractiveQueryServer(kafkaStreams, hostInfo); StateRestoreHttpReporter restoreReporter = new StateRestoreHttpReporter(queryServer); queryServer.init(); kafkaStreams.setGlobalStateRestoreListener(restoreReporter); kafkaStreams.setStateListener(((newState, oldState) -> { if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) { LOG.info("Setting the query server to ready"); queryServer.setReady(true); } else if (newState != KafkaStreams.State.RUNNING) { LOG.info("State not RUNNING, disabling the query server"); queryServer.setReady(false); } })); kafkaStreams.setUncaughtExceptionHandler((t, e) -> { LOG.error("Thread {} had a fatal error {}", t, e, e); shutdown(kafkaStreams, queryServer); }); Runtime.getRuntime().addShutdownHook(new Thread(() -> { shutdown(kafkaStreams, queryServer); })); LOG.info("Stock Analysis KStream Interactive Query App Started"); kafkaStreams.cleanUp(); kafkaStreams.start(); }