org.apache.kafka.streams.kstream.KTable Java Examples
The following examples show how to use
org.apache.kafka.streams.kstream.KTable.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaStreamsStreamListenerSetupMethodOrchestrator.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 7 votes |
@SuppressWarnings("unchecked") private boolean isDeclarativeInput(String targetBeanName, MethodParameter methodParameter) { if (!methodParameter.getParameterType().isAssignableFrom(Object.class) && this.applicationContext.containsBean(targetBeanName)) { Class<?> targetBeanClass = this.applicationContext.getType(targetBeanName); if (targetBeanClass != null) { boolean supports = KafkaStreamsBinderUtils.supportsKStream(methodParameter, targetBeanClass); if (!supports) { supports = KTable.class.isAssignableFrom(targetBeanClass) && KTable.class.isAssignableFrom(methodParameter.getParameterType()); if (!supports) { supports = GlobalKTable.class.isAssignableFrom(targetBeanClass) && GlobalKTable.class.isAssignableFrom(methodParameter.getParameterType()); } } return supports; } } return false; }
Example #2
Source File: StreamingWordCount.java From Kafka-Streams-Real-time-Stream-Processing with The Unlicense | 7 votes |
public static void main(final String[] args) { final Properties props = new Properties(); props.put(StreamsConfig.APPLICATION_ID_CONFIG, "StreamingWordCount"); props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put(StreamsConfig.STATE_DIR_CONFIG, "state-store"); props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); logger.info("Start Reading Messages"); StreamsBuilder streamBuilder = new StreamsBuilder(); KStream<String, String> KS0 = streamBuilder.stream("streaming-word-count"); KStream<String, String> KS1 = KS0.flatMapValues(value -> Arrays.asList(value.toLowerCase().split(" "))); KGroupedStream<String, String> KGS2 = KS1.groupBy((key, value) -> value); KTable<String, Long> KTS3 = KGS2.count(); KTS3.toStream().peek( (k, v) -> logger.info("Key = " + k + " Value = " + v.toString()) ); KafkaStreams streams = new KafkaStreams(streamBuilder.build(), props); streams.start(); Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); }
Example #3
Source File: FkJoinTableToTable.java From kafka-tutorials with Apache License 2.0 | 7 votes |
public Topology buildTopology(Properties envProps) { final StreamsBuilder builder = new StreamsBuilder(); final String albumTopic = envProps.getProperty("album.topic.name"); final String userTrackPurchaseTopic = envProps.getProperty("tracks.purchase.topic.name"); final String musicInterestTopic = envProps.getProperty("music.interest.topic.name"); final Serde<Long> longSerde = getPrimitiveAvroSerde(envProps, true); final Serde<MusicInterest> musicInterestSerde = getSpecificAvroSerde(envProps); final Serde<Album> albumSerde = getSpecificAvroSerde(envProps); final Serde<TrackPurchase> trackPurchaseSerde = getSpecificAvroSerde(envProps); final KTable<Long, Album> albums = builder.table(albumTopic, Consumed.with(longSerde, albumSerde)); final KTable<Long, TrackPurchase> trackPurchases = builder.table(userTrackPurchaseTopic, Consumed.with(longSerde, trackPurchaseSerde)); final MusicInterestJoiner trackJoiner = new MusicInterestJoiner(); final KTable<Long, MusicInterest> musicInterestTable = trackPurchases.join(albums, TrackPurchase::getAlbumId, trackJoiner); musicInterestTable.toStream().to(musicInterestTopic, Produced.with(longSerde, musicInterestSerde)); return builder.build(); }
Example #4
Source File: SchemaKTable.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") @Override public SchemaKTable select(final List<Pair<String, Expression>> expressionPairList) { final Pair<Schema, SelectValueMapper> schemaAndMapper = createSelectValueMapperAndSchema(expressionPairList); KTable projectedKTable = ktable.mapValues(schemaAndMapper.right); return new SchemaKTable( schemaAndMapper.left, projectedKTable, keyField, Collections.singletonList(this), isWindowed, Type.PROJECT, functionRegistry, schemaRegistryClient ); }
Example #5
Source File: StreamUtils.java From kafka-graphs with Apache License 2.0 | 6 votes |
public static <K, V> KTable<K, V> tableFromCollection( StreamsBuilder builder, Properties props, String topic, int numPartitions, short replicationFactor, Serde<K> keySerde, Serde<V> valueSerde, Collection<KeyValue<K, V>> values) { ClientUtils.createTopic(topic, numPartitions, replicationFactor, props); try (Producer<K, V> producer = new KafkaProducer<>(props, keySerde.serializer(), valueSerde.serializer())) { for (KeyValue<K, V> value : values) { ProducerRecord<K, V> producerRecord = new ProducerRecord<>(topic, value.key, value.value); producer.send(producerRecord); } producer.flush(); } return builder.table(topic, Consumed.with(keySerde, valueSerde), Materialized.with(keySerde, valueSerde)); }
Example #6
Source File: WordCount.java From fluent-kafka-streams-tests with MIT License | 6 votes |
public Topology getTopology() { final Serde<String> stringSerde = Serdes.String(); final Serde<Long> longSerde = Serdes.Long(); final StreamsBuilder builder = new StreamsBuilder(); final KStream<String, String> textLines = builder.stream(this.inputTopic); final Pattern pattern = Pattern.compile("\\W+", Pattern.UNICODE_CHARACTER_CLASS); final KTable<String, Long> wordCounts = textLines .flatMapValues(value -> Arrays.asList(pattern.split(value.toLowerCase()))) .groupBy((key, word) -> word) .count(); wordCounts.toStream().to(this.outputTopic, Produced.with(stringSerde, longSerde)); return builder.build(); }
Example #7
Source File: PregelComputation.java From kafka-graphs with Apache License 2.0 | 6 votes |
public PregelState run(int maxIterations, CompletableFuture<KTable<K, VV>> futureResult) { this.maxIterations = maxIterations; this.futureResult = futureResult; PregelState pregelState = new PregelState(State.RUNNING, -1, Stage.SEND); try { String rootPath = ZKUtils.PREGEL_PATH + applicationId; String childPath = ZKUtils.SUPERSTEP; byte[] childData = pregelState.toBytes(); if (ZKUtils.hasChild(curator, rootPath, childPath)) { ZKUtils.updateChild(curator, rootPath, childPath, childData); } else { ZKUtils.addChild(curator, rootPath, childPath, CreateMode.PERSISTENT, childData); } return pregelState; } catch (Exception e) { throw toRuntimeException(e); } }
Example #8
Source File: KGraph.java From kafka-graphs with Apache License 2.0 | 6 votes |
public <T> KTable<K, T> groupReduceOnEdges(EdgesFunctionWithVertexValue<K, VV, EV, T> edgesFunction, EdgeDirection direction) throws IllegalArgumentException { switch (direction) { case IN: return vertices() .leftJoin(edgesGroupedByTarget(), new ApplyEdgeLeftJoinFunction<>(edgesFunction), Materialized.with(keySerde(), new KryoSerde<>())); case OUT: return vertices() .leftJoin(edgesGroupedBySource(), new ApplyEdgeLeftJoinFunction<>(edgesFunction), Materialized.with(keySerde(), new KryoSerde<>())); case BOTH: throw new UnsupportedOperationException(); default: throw new IllegalArgumentException("Illegal edge direction"); } }
Example #9
Source File: StreamDemo.java From javatech with Creative Commons Attribution Share Alike 4.0 International | 6 votes |
public static void main(String[] args) { // 1. 指定流的配置 Properties config = new Properties(); config.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-application"); config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, HOST); config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); // 设置流构造器 StreamsBuilder builder = new StreamsBuilder(); KStream<String, String> textLines = builder.stream("TextLinesTopic"); KTable<String, Long> wordCounts = textLines .flatMapValues(textLine -> Arrays.asList(textLine.toLowerCase().split("\\W+"))) .groupBy((key, word) -> word) .count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("counts-store")); wordCounts.toStream().to("WordsWithCountsTopic", Produced.with(Serdes.String(), Serdes.Long())); // 根据流构造器和流配置初始化 Kafka 流 KafkaStreams streams = new KafkaStreams(builder.build(), config); streams.start(); }
Example #10
Source File: KTableBoundElementFactory.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 6 votes |
@Override public KTable createInput(String name) { BindingProperties bindingProperties = this.bindingServiceProperties.getBindingProperties(name); ConsumerProperties consumerProperties = bindingProperties.getConsumer(); if (consumerProperties == null) { consumerProperties = this.bindingServiceProperties.getConsumerProperties(name); consumerProperties.setUseNativeDecoding(true); } else { if (!encodingDecodingBindAdviceHandler.isDecodingSettingProvided()) { consumerProperties.setUseNativeDecoding(true); } } // Always set multiplex to true in the kafka streams binder consumerProperties.setMultiplex(true); KTableBoundElementFactory.KTableWrapperHandler wrapper = new KTableBoundElementFactory.KTableWrapperHandler(); ProxyFactory proxyFactory = new ProxyFactory( KTableBoundElementFactory.KTableWrapper.class, KTable.class); proxyFactory.addAdvice(wrapper); return (KTable) proxyFactory.getProxy(); }
Example #11
Source File: StreamToTableJoinIntegrationTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 6 votes |
@StreamListener @SendTo("output") public KStream<String, Long> process( @Input("input") KStream<String, Long> userClicksStream, @Input("input-x") KTable<String, String> userRegionsTable) { return userClicksStream .leftJoin(userRegionsTable, (clicks, region) -> new RegionWithClicks( region == null ? "UNKNOWN" : region, clicks), Joined.with(Serdes.String(), Serdes.Long(), null)) .map((user, regionWithClicks) -> new KeyValue<>( regionWithClicks.getRegion(), regionWithClicks.getClicks())) .groupByKey(Serialized.with(Serdes.String(), Serdes.Long())) .reduce(Long::sum) .toStream(); }
Example #12
Source File: KTableBoundElementFactory.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 6 votes |
@Override public Object invoke(MethodInvocation methodInvocation) throws Throwable { if (methodInvocation.getMethod().getDeclaringClass().equals(KTable.class)) { Assert.notNull(this.delegate, "Trying to prepareConsumerBinding " + methodInvocation.getMethod() + " but no delegate has been set."); return methodInvocation.getMethod().invoke(this.delegate, methodInvocation.getArguments()); } else if (methodInvocation.getMethod().getDeclaringClass() .equals(KTableBoundElementFactory.KTableWrapper.class)) { return methodInvocation.getMethod().invoke(this, methodInvocation.getArguments()); } else { throw new IllegalStateException( "Only KTable method invocations are permitted"); } }
Example #13
Source File: StructuredDataSourceNode.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
private KTable createKTable( StreamsBuilder builder, final Topology.AutoOffsetReset autoOffsetReset, final KsqlTable ksqlTable, final Serde<GenericRow> genericRowSerde, final Serde<GenericRow> genericRowSerdeAfterRead ) { if (ksqlTable.isWindowed()) { return table( builder.stream( ksqlTable.getKsqlTopic().getKafkaTopicName(), Consumed.with(windowedSerde, genericRowSerde).withOffsetResetPolicy(autoOffsetReset) ).mapValues(windowedMapper).transformValues(new AddTimestampColumn()), windowedSerde, genericRowSerdeAfterRead ); } else { return table( builder.stream( ksqlTable.getKsqlTopic().getKafkaTopicName(), Consumed.with(Serdes.String(), genericRowSerde).withOffsetResetPolicy(autoOffsetReset) ).mapValues(nonWindowedValueMapper).transformValues(new AddTimestampColumn()), Serdes.String(), genericRowSerdeAfterRead ); } }
Example #14
Source File: StatisticsBuilder.java From football-events with MIT License | 6 votes |
private void buildPlayerStatistics(KStream<String, GoalScored> goalStream) { KTable<String, PlayerStartedCareer> playerTable = builder .table(PLAYER_STARTED_TOPIC, with(String(), playerSerde)); KTable<String, PlayerGoals> playerGoalsTable = goalStream .selectKey((matchId, goal) -> goal.getScorerId()) .leftJoin(playerTable, (goal, player) -> new PlayerGoals(player).goal(goal), with(String(), goalScoredSerde, playerSerde)) .groupByKey(Serialized.with(String(), playerGoalsSerde)) .reduce(PlayerGoals::aggregate, materialized(PLAYER_GOALS_STORE, playerGoalsSerde)); KTable<String, PlayerCards> playerCardsTable = builder .stream(CARD_RECEIVED_TOPIC, with(String(), cardReceivedSerde)) .selectKey((matchId, card) -> card.getReceiverId()) .leftJoin(playerTable, (card, player) -> new PlayerCards(player).card(card), with(String(), cardReceivedSerde, playerSerde)) .groupByKey(Serialized.with(String(), playerCardsSerde)) .reduce(PlayerCards::aggregate, materialized(PLAYER_CARDS_STORE, playerCardsSerde)); // publish changes to a view topic playerCardsTable.toStream().to(PLAYER_CARDS_TOPIC, Produced.with(String(), playerCardsSerde)); KStream<String, PlayerGoals> playerGoalsStream = playerGoalsTable.toStream(); playerGoalsStream.to(PLAYER_GOALS_TOPIC, Produced.with(String(), playerGoalsSerde)); }
Example #15
Source File: KGraph.java From kafka-graphs with Apache License 2.0 | 6 votes |
public <T> KGraph<K, VV, EV> joinWithEdgesOnTarget(KTable<K, T> inputDataSet, final EdgeJoinFunction<EV, T> edgeJoinFunction) { KTable<Edge<K>, EV> resultedEdges = edgesGroupedByTarget() .leftJoin(inputDataSet, new ApplyLeftJoinToEdgeValuesOnEitherSourceOrTarget<>(edgeJoinFunction), Materialized.with(keySerde(), new KryoSerde<>())) .toStream() .flatMap((k, edgeWithValues) -> { List<KeyValue<Edge<K>, EV>> edges = new ArrayList<>(); for (EdgeWithValue<K, EV> edge : edgeWithValues) { edges.add(new KeyValue<>(new Edge<>(edge.source(), edge.target()), edge.value())); } return edges; }) .groupByKey(Grouped.with(new KryoSerde<>(), edgeValueSerde())) .<EV>reduce((v1, v2) -> v2, Materialized.<Edge<K>, EV, KeyValueStore<Bytes, byte[]>>as( generateStoreName()).withKeySerde(new KryoSerde<>()).withValueSerde(edgeValueSerde())); return new KGraph<>(vertices, resultedEdges, serialized); }
Example #16
Source File: HoppingWindowExpression.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") @Override public KTable applyAggregate( KGroupedStream groupedStream, Initializer initializer, UdafAggregator aggregator, Materialized<String, GenericRow, ?> materialized ) { return groupedStream.windowedBy( TimeWindows.of(sizeUnit.toMillis(size)) .advanceBy(advanceByUnit.toMillis(advanceBy)) ).aggregate(initializer, aggregator, materialized); }
Example #17
Source File: LastLoginDemo.java From Kafka-Streams-Real-time-Stream-Processing with The Unlicense | 5 votes |
public static void main(String[] args) { Properties properties = new Properties(); properties.put(StreamsConfig.APPLICATION_ID_CONFIG, AppConfigs.applicationID); properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers); properties.put(StreamsConfig.STATE_DIR_CONFIG, AppConfigs.stateStoreName); StreamsBuilder streamsBuilder = new StreamsBuilder(); KTable<String, UserDetails> userDetailsKTable = streamsBuilder.table( AppConfigs.userMasterTopic, Consumed.with(AppSerdes.String(), AppSerdes.UserDetails()) ); KTable<String, UserLogin> userLoginKTable = streamsBuilder.table( AppConfigs.lastLoginTopic, Consumed.with(AppSerdes.String(), AppSerdes.UserLogin()) ); userLoginKTable.join(userDetailsKTable, (userLogin, userDetails) -> { userDetails.setLastLogin(userLogin.getCreatedTime()); return userDetails; } ).toStream().to(AppConfigs.userMasterTopic, Produced.with(AppSerdes.String(), AppSerdes.UserDetails())); userDetailsKTable.toStream().foreach( (k, userDetails) -> logger.info( "Key = " + k + " Value = " + userDetails ) ); KafkaStreams streams = new KafkaStreams(streamsBuilder.build(), properties); streams.start(); Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); }
Example #18
Source File: StreamingTable.java From Kafka-Streams-Real-time-Stream-Processing with The Unlicense | 5 votes |
public static void main(final String[] args) { final Properties props = new Properties(); props.put(StreamsConfig.APPLICATION_ID_CONFIG, "StreamingTable"); props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put(StreamsConfig.STATE_DIR_CONFIG, "state-store"); props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, WallclockTimestampExtractor.class.getName()); //Uncomment to Enable record cache of size 10 MB. //props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 10 * 1024 * 1024L); //Uncomment to Set commit interval to 1 second. //props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000); StreamsBuilder streamBuilder = new StreamsBuilder(); KTable<String, String> KT0 = streamBuilder.table("stock-tick"); /* //Uncomment this block and comment next line to suppress KTable<String, String> KT1 = KT0.filter((key, value) -> key.contains("HDFCBANK")) .suppress(Suppressed.untilTimeLimit( Duration.ofMinutes(5), Suppressed.BufferConfig.maxBytes(1000000L).emitEarlyWhenFull()) ); */ KTable<String, String> KT1 = KT0.filter((key, value) -> key.contains("HDFCBANK")); KStream<String, String> KS2 = KT1.toStream(); KS2.peek((k, v) -> System.out.println("Key = " + k + " Value = " + v)); KafkaStreams streams = new KafkaStreams(streamBuilder.build(), props); streams.start(); Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); }
Example #19
Source File: KGraph.java From kafka-graphs with Apache License 2.0 | 5 votes |
public KGraph<K, VV, EV> filterOnVertices(Predicate<K, VV> vertexFilter) { KTable<K, VV> filteredVertices = vertices.filter(vertexFilter); KTable<Edge<K>, EV> remainingEdges = edgesBySource() .join(filteredVertices, (e, v) -> e, Joined.with(keySerde(), new KryoSerde<>(), vertexValueSerde())) .map((k, edge) -> new KeyValue<>(edge.target(), edge)) .join(filteredVertices, (e, v) -> e, Joined.with(keySerde(), new KryoSerde<>(), vertexValueSerde())) .map((k, edge) -> new KeyValue<>(new Edge<>(edge.source(), edge.target()), edge.value())) .groupByKey(Grouped.with(new KryoSerde<>(), edgeValueSerde())) .reduce((v1, v2) -> v2, Materialized.<Edge<K>, EV, KeyValueStore<Bytes, byte[]>>as(generateStoreName()).withKeySerde(new KryoSerde<>()).withValueSerde(edgeValueSerde())); return new KGraph<>(filteredVertices, remainingEdges, serialized); }
Example #20
Source File: AbstractKafkaStreamsBinderProcessor.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
private KTable<?, ?> getKTable(KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, StreamsBuilder streamsBuilder, Serde<?> keySerde, Serde<?> valueSerde, String materializedAs, String bindingDestination, Topology.AutoOffsetReset autoOffsetReset) { final Consumed<?, ?> consumed = getConsumed(kafkaStreamsConsumerProperties, keySerde, valueSerde, autoOffsetReset); return materializedAs != null ? materializedAs(streamsBuilder, bindingDestination, materializedAs, keySerde, valueSerde, autoOffsetReset, kafkaStreamsConsumerProperties) : streamsBuilder.table(bindingDestination, consumed); }
Example #21
Source File: KafkaStreamsStreamListenerSetupMethodOrchestrator.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
private boolean methodParameterSupports(Method method) { boolean supports = false; for (int i = 0; i < method.getParameterCount(); i++) { MethodParameter methodParameter = MethodParameter.forExecutable(method, i); Class<?> parameterType = methodParameter.getParameterType(); if (parameterType.equals(KStream.class) || parameterType.equals(KTable.class) || parameterType.equals(GlobalKTable.class)) { supports = true; } } return supports; }
Example #22
Source File: TopScorersBuilder.java From football-events with MIT License | 5 votes |
public void build() { KTable<String, TopPlayers> top10Table = builder .stream(PLAYER_GOALS_TOPIC, Consumed.with(Serdes.String(), playerGoalsSerde)) // create a single record that includes the top scorers .groupBy((playerId, playerGoals) -> "topPlayers", Serialized.with(Serdes.String(), playerGoalsSerde)) .aggregate(() -> new TopPlayers(10), (playerId, playerStat, top10) -> top10.aggregate(playerStat), materialized(TOP_SCORERS_STORE, topSerde)); top10Table.toStream().to(TOP_SCORERS_TOPIC, Produced.with(String(), topSerde)); }
Example #23
Source File: StreamToTableJoinFunctionTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@Bean public BiFunction<KStream<String, Long>, KTable<String, String>, KStream<String, Long>> process() { return (userClicksStream, userRegionsTable) -> (userClicksStream .leftJoin(userRegionsTable, (clicks, region) -> new RegionWithClicks(region == null ? "UNKNOWN" : region, clicks), Joined.with(Serdes.String(), Serdes.Long(), null)) .map((user, regionWithClicks) -> new KeyValue<>(regionWithClicks.getRegion(), regionWithClicks.getClicks())) .groupByKey(Grouped.with(Serdes.String(), Serdes.Long())) .reduce(Long::sum) .toStream()); }
Example #24
Source File: KGraph.java From kafka-graphs with Apache License 2.0 | 5 votes |
private KTable<K, Iterable<EdgeWithValue<K, EV>>> edgesGroupedBy(Function<Edge<K>, K> fun) { return edges() .groupBy(new GroupEdges(fun), Grouped.with(keySerde(), new KryoSerde<>())) .aggregate( HashSet::new, (aggKey, value, aggregate) -> { ((Set<EdgeWithValue<K, EV>>) aggregate).add(value); return aggregate; }, (aggKey, value, aggregate) -> { ((Set<EdgeWithValue<K, EV>>) aggregate).remove(value); return aggregate; }, Materialized.with(keySerde(), new KryoSerde<>())); }
Example #25
Source File: StreamUtils.java From kafka-graphs with Apache License 2.0 | 5 votes |
public static <K, V> KTable<K, V> tableFromCollection( StreamsBuilder builder, Properties props, Serde<K> keySerde, Serde<V> valueSerde, Collection<KeyValue<K, V>> values) { return tableFromCollection(builder, props, "temp-" + UUID.randomUUID(), 50, (short) 1, keySerde, valueSerde, values); }
Example #26
Source File: KafkaStreamsTableJoin.java From spring-cloud-stream-samples with Apache License 2.0 | 5 votes |
@Bean public BiFunction<KStream<String, Long>, KTable<String, String>, KStream<String, Long>> process() { return (userClicksStream, userRegionsTable) -> userClicksStream .leftJoin(userRegionsTable, (clicks, region) -> new RegionWithClicks(region == null ? "UNKNOWN" : region, clicks), Joined.with(Serdes.String(), Serdes.Long(), null)) .map((user, regionWithClicks) -> new KeyValue<>(regionWithClicks.getRegion(), regionWithClicks.getClicks())) .groupByKey(Grouped.with(Serdes.String(), Serdes.Long())) .reduce((firstClicks, secondClicks) -> firstClicks + secondClicks) .toStream(); }
Example #27
Source File: TumblingWindowExpression.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") @Override public KTable applyAggregate(final KGroupedStream groupedStream, final Initializer initializer, final UdafAggregator aggregator, final Materialized<String, GenericRow, ?> materialized) { return groupedStream.windowedBy(TimeWindows.of(sizeUnit.toMillis(size))) .aggregate(initializer, aggregator, materialized); }
Example #28
Source File: SchemaKGroupedStream.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") public SchemaKTable aggregate( final Initializer initializer, final UdafAggregator aggregator, final WindowExpression windowExpression, final Serde<GenericRow> topicValueSerDe) { final KTable aggKtable; if (windowExpression != null) { final Materialized<String, GenericRow, ?> materialized = Materialized.<String, GenericRow, WindowStore<Bytes, byte[]>>with( Serdes.String(), topicValueSerDe); final KsqlWindowExpression ksqlWindowExpression = windowExpression.getKsqlWindowExpression(); aggKtable = ksqlWindowExpression.applyAggregate( kgroupedStream, initializer, aggregator, materialized ); } else { aggKtable = kgroupedStream.aggregate( initializer, aggregator, Materialized.with(Serdes.String(), topicValueSerDe) ); } return new SchemaKTable( schema, aggKtable, keyField, sourceSchemaKStreams, windowExpression != null, SchemaKStream.Type.AGGREGATE, functionRegistry, schemaRegistryClient ); }
Example #29
Source File: SummaryBulkAggregation.java From kafka-graphs with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") @Override public KTable<Windowed<Short>, T> run(final KStream<Edge<K>, EV> edgeStream) { //For parallel window support we key the edge stream by partition and apply a parallel fold per partition. //Finally, we merge all locally combined results into our final graph aggregation property. KTable<Windowed<Short>, S> partialAgg = edgeStream .groupByKey(Grouped.with(new KryoSerde<>(), new KryoSerde<>())) .windowedBy(TimeWindows.of(Duration.ofMillis(timeMillis))) .aggregate(this::initialValue, new PartialAgg<>(updateFun())) .toStream() .groupBy((k, v) -> GLOBAL_KEY) .windowedBy(TimeWindows.of(Duration.ofMillis(timeMillis))) .reduce(combineFun()) .mapValues(aggregator(edgeStream), Materialized.<Windowed<Short>, S, KeyValueStore<Bytes, byte[]>> as(KGraph.generateStoreName()).withKeySerde(new KryoSerde<>()).withValueSerde(new KryoSerde<>())); if (transform() != null) { return partialAgg.mapValues( transform(), Materialized.<Windowed<Short>, T, KeyValueStore<Bytes, byte[]>> as(KGraph.generateStoreName()).withKeySerde(new KryoSerde<>()).withValueSerde(new KryoSerde<>()) ); } return (KTable<Windowed<Short>, T>) partialAgg; }
Example #30
Source File: ConnectedComponentsTest.java From kafka-graphs with Apache License 2.0 | 5 votes |
@Test public void testGridConnectedComponents() throws Exception { String suffix = "grid"; StreamsBuilder builder = new StreamsBuilder(); Properties producerConfig = ClientUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class, LongSerializer.class, new Properties() ); KGraph<Long, Tuple2<Long, Long>, Long> gridGraph = GraphGenerators.gridGraph(builder, producerConfig, 10, 10); KTable<Long, Long> initialVertices = gridGraph.vertices().mapValues((id, v) -> id); KGraph<Long, Long, Long> graph = new KGraph<>(initialVertices, gridGraph.edges(), GraphSerialized.with(Serdes.Long(), Serdes.Long(), Serdes.Long())); Properties props = ClientUtils.streamsConfig("prepare-" + suffix, "prepare-client-" + suffix, CLUSTER.bootstrapServers(), graph.keySerde().getClass(), graph.vertexValueSerde().getClass()); CompletableFuture<Map<TopicPartition, Long>> state = GraphUtils.groupEdgesBySourceAndRepartition(builder, props, graph, "vertices-" + suffix, "edgesGroupedBySource-" + suffix, 2, (short) 1); Map<TopicPartition, Long> offsets = state.get(); algorithm = new PregelGraphAlgorithm<>(null, "run-" + suffix, CLUSTER.bootstrapServers(), CLUSTER.zKConnectString(), "vertices-" + suffix, "edgesGroupedBySource-" + suffix, offsets, graph.serialized(), "solutionSet-" + suffix, "solutionSetStore-" + suffix, "workSet-" + suffix, 2, (short) 1, Collections.emptyMap(), Optional.empty(), new ConnectedComponents<>()); props = ClientUtils.streamsConfig("run-" + suffix, "run-client-" + suffix, CLUSTER.bootstrapServers(), graph.keySerde().getClass(), KryoSerde.class); KafkaStreams streams = algorithm.configure(new StreamsBuilder(), props).streams(); GraphAlgorithmState<KTable<Long, Long>> paths = algorithm.run(); paths.result().get(); Thread.sleep(2000); Map<Long, Long> map = StreamUtils.mapFromStore(paths.streams(), "solutionSetStore-" + suffix); log.debug("result: {}", map); for (long i = 0; i < 100; i++) { assertEquals(0L, map.get(i).longValue()); } }