Java Code Examples for org.apache.kafka.streams.kstream.KTable#toStream()
The following examples show how to use
org.apache.kafka.streams.kstream.KTable#toStream() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StatisticsBuilder.java From football-events with MIT License | 6 votes |
private void buildPlayerStatistics(KStream<String, GoalScored> goalStream) { KTable<String, PlayerStartedCareer> playerTable = builder .table(PLAYER_STARTED_TOPIC, with(String(), playerSerde)); KTable<String, PlayerGoals> playerGoalsTable = goalStream .selectKey((matchId, goal) -> goal.getScorerId()) .leftJoin(playerTable, (goal, player) -> new PlayerGoals(player).goal(goal), with(String(), goalScoredSerde, playerSerde)) .groupByKey(Serialized.with(String(), playerGoalsSerde)) .reduce(PlayerGoals::aggregate, materialized(PLAYER_GOALS_STORE, playerGoalsSerde)); KTable<String, PlayerCards> playerCardsTable = builder .stream(CARD_RECEIVED_TOPIC, with(String(), cardReceivedSerde)) .selectKey((matchId, card) -> card.getReceiverId()) .leftJoin(playerTable, (card, player) -> new PlayerCards(player).card(card), with(String(), cardReceivedSerde, playerSerde)) .groupByKey(Serialized.with(String(), playerCardsSerde)) .reduce(PlayerCards::aggregate, materialized(PLAYER_CARDS_STORE, playerCardsSerde)); // publish changes to a view topic playerCardsTable.toStream().to(PLAYER_CARDS_TOPIC, Produced.with(String(), playerCardsSerde)); KStream<String, PlayerGoals> playerGoalsStream = playerGoalsTable.toStream(); playerGoalsStream.to(PLAYER_GOALS_TOPIC, Produced.with(String(), playerGoalsSerde)); }
Example 2
Source File: ScsApplication.java From spring_io_2019 with Apache License 2.0 | 5 votes |
@StreamListener @SendTo(Bindings.AVG_RATINGS) KStream<Long, Double> averageRatingsFor(@Input(Bindings.RATINGS) KStream<Long, Rating> ratings) { KGroupedStream<Long, Double> ratingsGrouped = ratings .mapValues(Rating::getRating) .groupByKey(); KTable<Long, Long> count = ratingsGrouped.count(); KTable<Long, Double> reduce = ratingsGrouped.reduce(Double::sum, Materialized.with(Serdes.Long(), Serdes.Double())); KTable<Long, Double> join = reduce.join(count, (sum, count1) -> sum / count1, Materialized.with(Serdes.Long(), Serdes.Double())); return join.toStream(); }
Example 3
Source File: OptimizationStream.java From micronaut-kafka with Apache License 2.0 | 5 votes |
@Singleton @Named(STREAM_OPTIMIZATION_ON) KStream<String, String> optimizationOn( @Named(STREAM_OPTIMIZATION_ON) ConfiguredStreamBuilder builder) { // set default serdes Properties props = builder.getConfiguration(); props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); KTable<String, String> table = builder .table(OPTIMIZATION_ON_INPUT, Materialized.as(OPTIMIZATION_ON_STORE)); return table.toStream(); }
Example 4
Source File: OptimizationStream.java From micronaut-kafka with Apache License 2.0 | 5 votes |
@Singleton @Named(STREAM_OPTIMIZATION_OFF) KStream<String, String> optimizationOff( @Named(STREAM_OPTIMIZATION_OFF) ConfiguredStreamBuilder builder) { // set default serdes Properties props = builder.getConfiguration(); props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); KTable<String, String> table = builder .table(OPTIMIZATION_OFF_INPUT, Materialized.as(OPTIMIZATION_OFF_STORE)); return table.toStream(); }
Example 5
Source File: StreamingTable.java From Kafka-Streams-Real-time-Stream-Processing with The Unlicense | 5 votes |
public static void main(final String[] args) { final Properties props = new Properties(); props.put(StreamsConfig.APPLICATION_ID_CONFIG, "StreamingTable"); props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put(StreamsConfig.STATE_DIR_CONFIG, "state-store"); props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, WallclockTimestampExtractor.class.getName()); //Uncomment to Enable record cache of size 10 MB. //props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 10 * 1024 * 1024L); //Uncomment to Set commit interval to 1 second. //props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000); StreamsBuilder streamBuilder = new StreamsBuilder(); KTable<String, String> KT0 = streamBuilder.table("stock-tick"); /* //Uncomment this block and comment next line to suppress KTable<String, String> KT1 = KT0.filter((key, value) -> key.contains("HDFCBANK")) .suppress(Suppressed.untilTimeLimit( Duration.ofMinutes(5), Suppressed.BufferConfig.maxBytes(1000000L).emitEarlyWhenFull()) ); */ KTable<String, String> KT1 = KT0.filter((key, value) -> key.contains("HDFCBANK")); KStream<String, String> KS2 = KT1.toStream(); KS2.peek((k, v) -> System.out.println("Key = " + k + " Value = " + v)); KafkaStreams streams = new KafkaStreams(streamBuilder.build(), props); streams.start(); Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); }