org.apache.kafka.streams.kstream.Consumed Java Examples
The following examples show how to use
org.apache.kafka.streams.kstream.Consumed.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: EmailService.java From qcon-microservices with Apache License 2.0 | 8 votes |
private KafkaStreams processStreams(final String bootstrapServers, final String stateDir) throws IOException { final StreamsBuilder builder = new StreamsBuilder(); //Create the streams/tables for the join final KStream<String, Order> orders = builder.stream(ORDERS.name(), Consumed.with(ORDERS.keySerde(), ORDERS.valueSerde())); final GlobalKTable<String, Customer> customers = builder.globalTable(CUSTOMERS.name(), Consumed.with(CUSTOMERS.keySerde(), CUSTOMERS.valueSerde())); // // Join a stream and a table then send an email for each // GlobalKTable to stream join takes three arguments: Table, mapping of stream (key,value) to table key for join // And the join function - takes values from stream and table and returns result orders.join(customers, (orderID, order) -> order.getCustomerId(), (order, customer) -> new EmailTuple(order,customer)) //Now for each tuple send an email. .peek((key, emailTuple) -> emailer.sendEmail(emailTuple) ); return new KafkaStreams(builder.build(), configStreams(bootstrapServers, stateDir, SERVICE_APP_ID)); }
Example #2
Source File: NameJoinGlobalKTable.java From fluent-kafka-streams-tests with MIT License | 7 votes |
public Topology getTopologyWithIntermediateTopic() { final StreamsBuilder builder = new StreamsBuilder(); final KStream<Long, Long> inputStream = builder.stream(INPUT_TOPIC, Consumed.with(Serdes.Long(), Serdes.Long())); builder.stream(NAME_INPUT, Consumed.with(Serdes.Long(), Serdes.String())) .mapValues(name -> name.toUpperCase()) .to(INTERMEDIATE_TOPIC); final GlobalKTable<Long, String> joinTable = builder.globalTable(INTERMEDIATE_TOPIC); inputStream .join(joinTable, (id, valueId) -> valueId, (id, name) -> name) .to(OUTPUT_TOPIC, Produced.with(Serdes.Long(), Serdes.String())); return builder.build(); }
Example #3
Source File: FkJoinTableToTable.java From kafka-tutorials with Apache License 2.0 | 7 votes |
public Topology buildTopology(Properties envProps) { final StreamsBuilder builder = new StreamsBuilder(); final String albumTopic = envProps.getProperty("album.topic.name"); final String userTrackPurchaseTopic = envProps.getProperty("tracks.purchase.topic.name"); final String musicInterestTopic = envProps.getProperty("music.interest.topic.name"); final Serde<Long> longSerde = getPrimitiveAvroSerde(envProps, true); final Serde<MusicInterest> musicInterestSerde = getSpecificAvroSerde(envProps); final Serde<Album> albumSerde = getSpecificAvroSerde(envProps); final Serde<TrackPurchase> trackPurchaseSerde = getSpecificAvroSerde(envProps); final KTable<Long, Album> albums = builder.table(albumTopic, Consumed.with(longSerde, albumSerde)); final KTable<Long, TrackPurchase> trackPurchases = builder.table(userTrackPurchaseTopic, Consumed.with(longSerde, trackPurchaseSerde)); final MusicInterestJoiner trackJoiner = new MusicInterestJoiner(); final KTable<Long, MusicInterest> musicInterestTable = trackPurchases.join(albums, TrackPurchase::getAlbumId, trackJoiner); musicInterestTable.toStream().to(musicInterestTopic, Produced.with(longSerde, musicInterestSerde)); return builder.build(); }
Example #4
Source File: AggregatingSum.java From kafka-tutorials with Apache License 2.0 | 6 votes |
public Topology buildTopology(Properties envProps, final SpecificAvroSerde<TicketSale> ticketSaleSerde) { final StreamsBuilder builder = new StreamsBuilder(); final String inputTopic = envProps.getProperty("input.topic.name"); final String outputTopic = envProps.getProperty("output.topic.name"); builder.stream(inputTopic, Consumed.with(Serdes.String(), ticketSaleSerde)) // Set key to title and value to ticket value .map((k, v) -> new KeyValue<>((String) v.getTitle(), (Integer) v.getTicketTotalValue())) // Group by title .groupByKey(Grouped.with(Serdes.String(), Serdes.Integer())) // Apply SUM aggregation .reduce(Integer::sum) // Write to stream specified by outputTopic .toStream().to(outputTopic, Produced.with(Serdes.String(), Serdes.Integer())); return builder.build(); }
Example #5
Source File: KafkaDenormalizer.java From cqrs-eventsourcing-kafka with Apache License 2.0 | 6 votes |
@Override public void start() throws Exception { Predicate<String, EventEnvelope> inventoryItemCreated = (k, v) -> k.equals(InventoryItemCreated.class.getSimpleName()); Predicate<String, EventEnvelope> inventoryItemRenamed = (k, v) -> k.equals(InventoryItemRenamed.class.getSimpleName()); Predicate<String, EventEnvelope> inventoryItemDeactivated = (k, v) -> k.equals(InventoryItemDeactivated.class.getSimpleName()); StreamsBuilder builder = new StreamsBuilder(); KStream<String, EventEnvelope>[] filteredStreams = builder .stream(INVENTORY_ITEM_TOPIC, Consumed.with(Serdes.String(), initializeEnvelopeSerde())) .selectKey((k, v) -> v.eventType) .branch(inventoryItemCreated, inventoryItemRenamed, inventoryItemDeactivated); filteredStreams[0].process(InventoryItemCreatedHandler::new); filteredStreams[1].process(InventoryItemRenamedHandler::new); filteredStreams[2].process(InventoryItemDeactivatedHandler::new); kafkaStreams = new KafkaStreams(builder.build(), getProperties()); kafkaStreams.cleanUp(); // -- only because we are using in-memory kafkaStreams.start(); }
Example #6
Source File: CampaignPerformanceApp.java From Kafka-Streams-Real-time-Stream-Processing with The Unlicense | 6 votes |
public static void main(String[] args) { Properties properties = new Properties(); properties.put(StreamsConfig.APPLICATION_ID_CONFIG, AppConfigs.applicationID); properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers); properties.put(StreamsConfig.STATE_DIR_CONFIG, AppConfigs.stateStoreLocation); StreamsBuilder builder = new StreamsBuilder(); AppTopology.withBuilder(builder); builder.stream( AppConfigs.outputTopic, Consumed.with(AppSerdes.String(), AppSerdes.CampaignPerformance()) ).foreach((k, v) -> logger.info("outside = " + v)); Topology topology = builder.build(); KafkaStreams streams = new KafkaStreams(topology, properties); streams.start(); Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); }
Example #7
Source File: TimeCheckDemo.java From Kafka-Streams-Real-time-Stream-Processing with The Unlicense | 6 votes |
public static void main(String[] args) { Properties props = new Properties(); props.put(StreamsConfig.APPLICATION_ID_CONFIG, AppConfigs.applicationID); props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers); StreamsBuilder streamsBuilder = new StreamsBuilder(); KStream<String, PosInvoice> KS0 = streamsBuilder.stream(AppConfigs.posTopicName, Consumed.with(PosSerdes.String(), PosSerdes.PosInvoice()) .withTimestampExtractor(new InvoiceTimeExtractor()) ); KS0.transformValues(() -> new ValueTransformer<PosInvoice, PosInvoice>() { private ProcessorContext context; @Override public void init(ProcessorContext processorContext) { this.context = processorContext; } @Override public PosInvoice transform(PosInvoice invoice) { logger.info("Invoice Time: " + new Timestamp(invoice.getCreatedTime()) + " Event Time: " + new Timestamp(context.timestamp())); return invoice; } @Override public void close() { } }); logger.info("Starting Kafka Streams"); KafkaStreams myStream = new KafkaStreams(streamsBuilder.build(), props); myStream.start(); Runtime.getRuntime().addShutdownHook(new Thread(myStream::close)); }
Example #8
Source File: StreamUtils.java From kafka-graphs with Apache License 2.0 | 6 votes |
public static <K, V> KStream<K, V> streamFromCollection( StreamsBuilder builder, Properties props, String topic, int numPartitions, short replicationFactor, Serde<K> keySerde, Serde<V> valueSerde, Collection<KeyValue<K, V>> values) { ClientUtils.createTopic(topic, numPartitions, replicationFactor, props); try (Producer<K, V> producer = new KafkaProducer<>(props, keySerde.serializer(), valueSerde.serializer())) { for (KeyValue<K, V> value : values) { ProducerRecord<K, V> producerRecord = new ProducerRecord<>(topic, value.key, value.value); producer.send(producerRecord); } producer.flush(); } return builder.stream(topic, Consumed.with(keySerde, valueSerde)); }
Example #9
Source File: StreamUtils.java From kafka-graphs with Apache License 2.0 | 6 votes |
public static <K, V> KTable<K, V> tableFromCollection( StreamsBuilder builder, Properties props, String topic, int numPartitions, short replicationFactor, Serde<K> keySerde, Serde<V> valueSerde, Collection<KeyValue<K, V>> values) { ClientUtils.createTopic(topic, numPartitions, replicationFactor, props); try (Producer<K, V> producer = new KafkaProducer<>(props, keySerde.serializer(), valueSerde.serializer())) { for (KeyValue<K, V> value : values) { ProducerRecord<K, V> producerRecord = new ProducerRecord<>(topic, value.key, value.value); producer.send(producerRecord); } producer.flush(); } return builder.table(topic, Consumed.with(keySerde, valueSerde), Materialized.with(keySerde, valueSerde)); }
Example #10
Source File: GraphUtils.java From kafka-graphs with Apache License 2.0 | 6 votes |
public static <K, VV, EV> CompletableFuture<Map<TopicPartition, Long>> groupEdgesBySourceAndRepartition( StreamsBuilder builder, Properties streamsConfig, String initialVerticesTopic, String initialEdgesTopic, GraphSerialized<K, VV, EV> serialized, String verticesTopic, String edgesGroupedBySourceTopic, int numPartitions, short replicationFactor ) { KGraph<K, VV, EV> graph = new KGraph<>( builder.table(initialVerticesTopic, Consumed.with(serialized.keySerde(), serialized.vertexValueSerde())), builder.table(initialEdgesTopic, Consumed.with(new KryoSerde<>(), serialized.edgeValueSerde())), serialized); return groupEdgesBySourceAndRepartition(builder, streamsConfig, graph, verticesTopic, edgesGroupedBySourceTopic, numPartitions, replicationFactor); }
Example #11
Source File: AggregatingCount.java From kafka-tutorials with Apache License 2.0 | 6 votes |
public Topology buildTopology(Properties envProps, final SpecificAvroSerde<TicketSale> ticketSaleSerde) { final StreamsBuilder builder = new StreamsBuilder(); final String inputTopic = envProps.getProperty("input.topic.name"); final String outputTopic = envProps.getProperty("output.topic.name"); builder.stream(inputTopic, Consumed.with(Serdes.String(), ticketSaleSerde)) // Set key to title and value to ticket value .map((k, v) -> new KeyValue<>((String) v.getTitle(), (Integer) v.getTicketTotalValue())) // Group by title .groupByKey(Grouped.with(Serdes.String(), Serdes.Integer())) // Apply COUNT method .count() // Write to stream specified by outputTopic .toStream().to(outputTopic, Produced.with(Serdes.String(), Serdes.Long())); return builder.build(); }
Example #12
Source File: SerializationTutorial.java From kafka-tutorials with Apache License 2.0 | 6 votes |
protected Topology buildTopology(Properties envProps, final SpecificAvroSerde<Movie> movieSpecificAvroSerde, final KafkaProtobufSerde<MovieProtos.Movie> movieProtoSerde) { final String inputAvroTopicName = envProps.getProperty("input.avro.movies.topic.name"); final String outProtoTopicName = envProps.getProperty("output.proto.movies.topic.name"); final StreamsBuilder builder = new StreamsBuilder(); // topic contains values in avro format final KStream<Long, Movie> avroMovieStream = builder.stream(inputAvroTopicName, Consumed.with(Long(), movieSpecificAvroSerde)); //convert and write movie data in protobuf format avroMovieStream .map((key, avroMovie) -> new KeyValue<>(key, MovieProtos.Movie.newBuilder() .setMovieId(avroMovie.getMovieId()) .setTitle(avroMovie.getTitle()) .setReleaseYear(avroMovie.getReleaseYear()) .build())) .to(outProtoTopicName, Produced.with(Long(), movieProtoSerde)); return builder.build(); }
Example #13
Source File: NameJoinGlobalKTable.java From fluent-kafka-streams-tests with MIT License | 6 votes |
public Topology getTopology() { final StreamsBuilder builder = new StreamsBuilder(); final KStream<Long, Long> inputStream = builder.stream(INPUT_TOPIC, Consumed.with(Serdes.Long(), Serdes.Long())); final GlobalKTable<Long, String> joinTable = builder.globalTable(NAME_INPUT); inputStream .join(joinTable, (id, valueId) -> valueId, (id, name) -> name) .to(OUTPUT_TOPIC, Produced.with(Serdes.Long(), Serdes.String())); return builder.build(); }
Example #14
Source File: ITKafkaStreamsTracing.java From brave with Apache License 2.0 | 5 votes |
@Test public void should_create_spans_from_stream_with_tracing_filter_predicate_false() { String inputTopic = testName.getMethodName() + "-input"; String outputTopic = testName.getMethodName() + "-output"; StreamsBuilder builder = new StreamsBuilder(); builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String())) .transform(kafkaStreamsTracing.filter("filter-2", (key, value) -> false)) .to(outputTopic, Produced.with(Serdes.String(), Serdes.String())); Topology topology = builder.build(); KafkaStreams streams = buildKafkaStreams(topology); send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE)); waitForStreamToRun(streams); MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER); assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic); MutableSpan spanProcessor = testSpanHandler.takeLocalSpan(); assertChildOf(spanProcessor, spanInput); assertThat(spanProcessor.tags()).containsEntry(KAFKA_STREAMS_FILTERED_TAG, "true"); // the filter transformer returns false so record is dropped streams.close(); streams.cleanUp(); }
Example #15
Source File: ITKafkaStreamsTracing.java From brave with Apache License 2.0 | 5 votes |
@Test public void should_create_spans_from_stream_with_tracing_processor() { ProcessorSupplier<String, String> processorSupplier = kafkaStreamsTracing.processor( "forward-1", () -> new AbstractProcessor<String, String>() { @Override public void process(String key, String value) { try { Thread.sleep(100L); } catch (InterruptedException e) { e.printStackTrace(); } } }); String inputTopic = testName.getMethodName() + "-input"; StreamsBuilder builder = new StreamsBuilder(); builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String())) .process(processorSupplier); Topology topology = builder.build(); KafkaStreams streams = buildKafkaStreams(topology); send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE)); waitForStreamToRun(streams); MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER); assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic); MutableSpan spanProcessor = testSpanHandler.takeLocalSpan(); assertChildOf(spanProcessor, spanInput); streams.close(); streams.cleanUp(); }
Example #16
Source File: ITKafkaStreamsTracing.java From brave with Apache License 2.0 | 5 votes |
@Test public void should_create_spans_from_stream_with_tracing_filter_predicate_true() { String inputTopic = testName.getMethodName() + "-input"; String outputTopic = testName.getMethodName() + "-output"; StreamsBuilder builder = new StreamsBuilder(); builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String())) .transform(kafkaStreamsTracing.filter("filter-1", (key, value) -> true)) .to(outputTopic, Produced.with(Serdes.String(), Serdes.String())); Topology topology = builder.build(); KafkaStreams streams = buildKafkaStreams(topology); send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE)); waitForStreamToRun(streams); MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER); assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic); MutableSpan spanProcessor = testSpanHandler.takeLocalSpan(); assertChildOf(spanProcessor, spanInput); assertThat(spanProcessor.tags()).containsEntry(KAFKA_STREAMS_FILTERED_TAG, "false"); // the filter transformer returns true so record is not dropped MutableSpan spanOutput = testSpanHandler.takeRemoteSpan(PRODUCER); assertThat(spanOutput.tags()).containsEntry("kafka.topic", outputTopic); assertChildOf(spanOutput, spanProcessor); streams.close(); streams.cleanUp(); }
Example #17
Source File: AbstractKafkaStreamsBinderProcessor.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
private <K, V> Consumed<K, V> getConsumed(KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, Serde<K> keySerde, Serde<V> valueSerde, Topology.AutoOffsetReset autoOffsetReset) { TimestampExtractor timestampExtractor = null; if (!StringUtils.isEmpty(kafkaStreamsConsumerProperties.getTimestampExtractorBeanName())) { timestampExtractor = applicationContext.getBean(kafkaStreamsConsumerProperties.getTimestampExtractorBeanName(), TimestampExtractor.class); } final Consumed<K, V> consumed = Consumed.with(keySerde, valueSerde) .withOffsetResetPolicy(autoOffsetReset); if (timestampExtractor != null) { consumed.withTimestampExtractor(timestampExtractor); } return consumed; }
Example #18
Source File: KStreamsTopologyDescriptionParserTest.java From netbeans-mmd-plugin with Apache License 2.0 | 5 votes |
@Test public void testKsDsl2() { final String storeName = "stateStore"; final String globalStoreName = "glob-stateStore"; final StreamsBuilder builder = new StreamsBuilder(); final StoreBuilder<KeyValueStore<String, String>> storeBuilder = Stores.keyValueStoreBuilder( Stores.persistentKeyValueStore(storeName), Serdes.String(), Serdes.String()); final StoreBuilder<KeyValueStore<String, String>> globalStoreBuilder = Stores.keyValueStoreBuilder( Stores.persistentKeyValueStore(globalStoreName), Serdes.String(), Serdes.String()); builder.addGlobalStore(globalStoreBuilder, "some-global-topic", Consumed.with(Serdes.Short(), Serdes.String(), new WallclockTimestampExtractor(), Topology.AutoOffsetReset.EARLIEST), FakeProcessor::new); builder.addStateStore(storeBuilder); builder.<String, String>stream("input") .filter((k, v) -> v.endsWith("FOO")) .through("some-through-topic") .transformValues(() -> new SimpleValueTransformer(storeName), storeName) .to("output"); final Topology topology = builder.build(); final String text = topology.describe().toString(); System.out.println(text); final KStreamsTopologyDescriptionParser parsed = new KStreamsTopologyDescriptionParser(text); assertEquals(8, parsed.size()); }
Example #19
Source File: MegabusRefResolver.java From emodb with Apache License 2.0 | 5 votes |
@Override protected Topology topology() { StreamsBuilder streamsBuilder = new StreamsBuilder(); // merge the ref stream with the ref-retry stream. They must be merged into a single stream for ordering purposes final KStream<String, List<MegabusRef>> refStream = streamsBuilder.stream(_megabusRefTopic.getName(), Consumed.with(Serdes.String(), new JsonPOJOSerde<>(new TypeReference<List<MegabusRef>>() {}))) .merge(streamsBuilder.stream(_retryRefTopic.getName(), Consumed.with(Serdes.String(), new JsonPOJOSerde<>(new TypeReference<List<MegabusRef>>() {})))); // resolve refs into documents KStream<String, ResolutionResult> resolutionResults = refStream.mapValues(value -> { try { return resolveRefs(value); } catch (Throwable t) { _errorProcessingMeter.mark(); throw t; } }); resolutionResults // extract the resolved documents .flatMap((key, value) -> value.getKeyedResolvedDocs()) // convert deleted documents to null .mapValues(doc -> Optional.ofNullable(doc).map(Intrinsic::isDeleted).orElse(true) ? null : doc) // send to megabus .to(_megabusResolvedTopic.getName(), Produced.with(Serdes.String(), new JsonPOJOSerde<>(new TypeReference<Map<String, Object>>() {}))); resolutionResults // filter out all resolution results without missing refs .filterNot((key, result) -> result.getMissingRefs().isEmpty()) // add timestamp for missing refs .mapValues(result -> new MissingRefCollection(result.getMissingRefs(), Date.from(_clock.instant()))) // send to missing topic .to(_missingRefTopic.getName(), Produced.with(Serdes.String(), new JsonPOJOSerde<>(MissingRefCollection.class))); return streamsBuilder.build(); }
Example #20
Source File: AbstractKafkaStreamsBinderProcessor.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
private GlobalKTable<?, ?> getGlobalKTable(KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, StreamsBuilder streamsBuilder, Serde<?> keySerde, Serde<?> valueSerde, String materializedAs, String bindingDestination, Topology.AutoOffsetReset autoOffsetReset) { final Consumed<?, ?> consumed = getConsumed(kafkaStreamsConsumerProperties, keySerde, valueSerde, autoOffsetReset); return materializedAs != null ? materializedAsGlobalKTable(streamsBuilder, bindingDestination, materializedAs, keySerde, valueSerde, autoOffsetReset, kafkaStreamsConsumerProperties) : streamsBuilder.globalTable(bindingDestination, consumed); }
Example #21
Source File: ITKafkaStreamsTracing.java From brave with Apache License 2.0 | 5 votes |
@Test public void should_create_spans_from_stream_with_tracing_filter_not_predicate_false() { String inputTopic = testName.getMethodName() + "-input"; String outputTopic = testName.getMethodName() + "-output"; StreamsBuilder builder = new StreamsBuilder(); builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String())) .transform(kafkaStreamsTracing.filterNot("filterNot-2", (key, value) -> false)) .to(outputTopic, Produced.with(Serdes.String(), Serdes.String())); Topology topology = builder.build(); KafkaStreams streams = buildKafkaStreams(topology); send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE)); waitForStreamToRun(streams); MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER); assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic); MutableSpan spanProcessor = testSpanHandler.takeLocalSpan(); assertChildOf(spanProcessor, spanInput); assertThat(spanProcessor.tags()).containsEntry(KAFKA_STREAMS_FILTERED_TAG, "false"); // the filter transformer returns true so record is not dropped MutableSpan spanOutput = testSpanHandler.takeRemoteSpan(PRODUCER); assertThat(spanOutput.tags()).containsEntry("kafka.topic", outputTopic); assertChildOf(spanOutput, spanProcessor); streams.close(); streams.cleanUp(); }
Example #22
Source File: AbstractKafkaStreamsBinderProcessor.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
private KTable<?, ?> getKTable(KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, StreamsBuilder streamsBuilder, Serde<?> keySerde, Serde<?> valueSerde, String materializedAs, String bindingDestination, Topology.AutoOffsetReset autoOffsetReset) { final Consumed<?, ?> consumed = getConsumed(kafkaStreamsConsumerProperties, keySerde, valueSerde, autoOffsetReset); return materializedAs != null ? materializedAs(streamsBuilder, bindingDestination, materializedAs, keySerde, valueSerde, autoOffsetReset, kafkaStreamsConsumerProperties) : streamsBuilder.table(bindingDestination, consumed); }
Example #23
Source File: ITKafkaStreamsTracing.java From brave with Apache License 2.0 | 5 votes |
@Test public void should_create_spans_from_stream_with_tracing_filter_not_predicate_true() { String inputTopic = testName.getMethodName() + "-input"; String outputTopic = testName.getMethodName() + "-output"; StreamsBuilder builder = new StreamsBuilder(); builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String())) .transform(kafkaStreamsTracing.filterNot("filterNot-1", (key, value) -> true)) .to(outputTopic, Produced.with(Serdes.String(), Serdes.String())); Topology topology = builder.build(); KafkaStreams streams = buildKafkaStreams(topology); send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE)); waitForStreamToRun(streams); MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER); assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic); MutableSpan spanProcessor = testSpanHandler.takeLocalSpan(); assertChildOf(spanProcessor, spanInput); assertThat(spanProcessor.tags()).containsEntry(KAFKA_STREAMS_FILTERED_TAG, "true"); // the filterNot transformer returns true so record is dropped streams.close(); streams.cleanUp(); }
Example #24
Source File: MissingRefDelayProcessor.java From emodb with Apache License 2.0 | 5 votes |
@Override protected Topology topology() { StreamsBuilder streamsBuilder = new StreamsBuilder(); streamsBuilder.stream(_missingRefTopic.getName(), Consumed.with(Serdes.String(), new JsonPOJOSerde<>(MissingRefCollection.class))) .peek(this::delayRefs) .mapValues(MissingRefCollection::getMissingRefs) .to(_retryRefTopic.getName(), Produced.with(Serdes.String(), new JsonPOJOSerde<>(new TypeReference<List<MegabusRef>>() {}))); return streamsBuilder.build(); }
Example #25
Source File: AbstractKafkaStreamsBinderProcessor.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
protected KStream<?, ?> getKStream(String inboundName, BindingProperties bindingProperties, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, StreamsBuilder streamsBuilder, Serde<?> keySerde, Serde<?> valueSerde, Topology.AutoOffsetReset autoOffsetReset, boolean firstBuild) { if (firstBuild) { addStateStoreBeans(streamsBuilder); } KStream<?, ?> stream; if (this.kafkaStreamsExtendedBindingProperties .getExtendedConsumerProperties(inboundName).isDestinationIsPattern()) { final Pattern pattern = Pattern.compile(this.bindingServiceProperties.getBindingDestination(inboundName)); stream = streamsBuilder.stream(pattern); } else { String[] bindingTargets = StringUtils.commaDelimitedListToStringArray( this.bindingServiceProperties.getBindingDestination(inboundName)); final Consumed<?, ?> consumed = getConsumed(kafkaStreamsConsumerProperties, keySerde, valueSerde, autoOffsetReset); stream = streamsBuilder.stream(Arrays.asList(bindingTargets), consumed); } final boolean nativeDecoding = this.bindingServiceProperties .getConsumerProperties(inboundName).isUseNativeDecoding(); if (nativeDecoding) { LOG.info("Native decoding is enabled for " + inboundName + ". Inbound deserialization done at the broker."); } else { LOG.info("Native decoding is disabled for " + inboundName + ". Inbound message conversion done by Spring Cloud Stream."); } return getkStream(bindingProperties, stream, nativeDecoding); }
Example #26
Source File: ITKafkaStreamsTracing.java From brave with Apache License 2.0 | 5 votes |
@Test public void should_create_spans_from_stream_with_tracing_mark_as_filtered_predicate_false() { String inputTopic = testName.getMethodName() + "-input"; String outputTopic = testName.getMethodName() + "-output"; StreamsBuilder builder = new StreamsBuilder(); builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String())) .transformValues(kafkaStreamsTracing.markAsFiltered("filter-2", (key, value) -> false)) .filterNot((k, v) -> Objects.isNull(v)) .to(outputTopic, Produced.with(Serdes.String(), Serdes.String())); Topology topology = builder.build(); KafkaStreams streams = buildKafkaStreams(topology); send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE)); waitForStreamToRun(streams); MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER); assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic); MutableSpan spanProcessor = testSpanHandler.takeLocalSpan(); assertChildOf(spanProcessor, spanInput); assertThat(spanProcessor.tags()).containsEntry(KAFKA_STREAMS_FILTERED_TAG, "true"); // the filter transformer returns false so record is dropped streams.close(); streams.cleanUp(); }
Example #27
Source File: KafkaStreamsPipeline.java From quarkus with Apache License 2.0 | 5 votes |
@Produces public Topology buildTopology() { StreamsBuilder builder = new StreamsBuilder(); ObjectMapperSerde<Category> categorySerde = new ObjectMapperSerde<>(Category.class); ObjectMapperSerde<Customer> customerSerde = new ObjectMapperSerde<>(Customer.class); ObjectMapperSerde<EnrichedCustomer> enrichedCustomerSerde = new ObjectMapperSerde<>(EnrichedCustomer.class); KTable<Integer, Category> categories = builder.table( "streams-test-categories", Consumed.with(Serdes.Integer(), categorySerde)); KStream<Integer, EnrichedCustomer> customers = builder .stream("streams-test-customers", Consumed.with(Serdes.Integer(), customerSerde)) .selectKey((id, customer) -> customer.category) .join( categories, (customer, category) -> { return new EnrichedCustomer(customer.id, customer.name, category); }, Joined.with(Serdes.Integer(), customerSerde, categorySerde)); KeyValueBytesStoreSupplier storeSupplier = Stores.inMemoryKeyValueStore("countstore"); customers.groupByKey() .count(Materialized.<Integer, Long> as(storeSupplier)); customers.selectKey((categoryId, customer) -> customer.id) .to("streams-test-customers-processed", Produced.with(Serdes.Integer(), enrichedCustomerSerde)); return builder.build(); }
Example #28
Source File: VehicleStatusCountProcessor.java From microservice-patterns with Apache License 2.0 | 5 votes |
@Bean public KStream<String, Long> statusCountStreamProcessor(StreamsBuilder streamsBuilder) { KStream<Integer, VehicleLocation> stream = streamsBuilder.stream("gpslocation", //Read from topic Consumed.with(Serdes.Integer(), new JsonSerde<>(VehicleLocation.class))); //using Integer and JSON serde return stream.map((k,v)-> { // transform they key as Online/Offline based on status String online = v.isOnline() == true ? "Online" : "Offline"; return new KeyValue<>(online, v); }) .groupByKey(Serialized.with( //Group by the newly mapped key in previous step Serdes.String(), new JsonSerde<>(VehicleLocation.class)) ) .count(Materialized.as("statusCount")) // materialize this value to state store .toStream(); }
Example #29
Source File: EventSourcedTopologyTest.java From simplesource with Apache License 2.0 | 5 votes |
@Test void testDistributor() { String topicNamesTopic = "topic_names"; String outputTopic = "output_topic"; TopologyContext<String, TestCommand, TestEvent, Optional<TestAggregate>> ctx = ctxBuilder.buildContext(); driver = new TestDriverInitializer().build(builder -> { EventSourcedTopology.InputStreams<String, TestCommand> inputStreams = EventSourcedTopology.addTopology(ctx, builder); DistributorContext<CommandId, CommandResponse<String>> context = new DistributorContext<>( topicNamesTopic, new DistributorSerdes<>(ctx.serdes().commandId(), ctx.serdes().commandResponse()), ctx.aggregateSpec().generation().stateStoreSpec(), CommandResponse::commandId, CommandId::id); KStream<CommandId, String> topicNames = builder.stream(topicNamesTopic, Consumed.with(ctx.serdes().commandId(), Serdes.String())); ResultDistributor.distribute(context, inputStreams.commandResponse, topicNames); }); TestContextDriver<String, TestCommand, TestEvent, Optional<TestAggregate>> ctxDriver = new TestContextDriver<>(ctx, driver); CommandRequest<String, TestCommand> commandRequest = CommandRequest.of( CommandId.random(), key, Sequence.first(), new TestCommand.CreateCommand("Name 2")); ctxDriver.getPublisher(ctx.serdes().commandId(), Serdes.String()) .publish(topicNamesTopic, commandRequest.commandId(), outputTopic); ctxDriver.publishCommand( key, commandRequest); ProducerRecord<String, CommandResponse<String>> output = driver.readOutput(outputTopic, Serdes.String().deserializer(), ctx.serdes().commandResponse().deserializer()); assertThat(output.key()).isEqualTo(String.format("%s:%s", outputTopic, commandRequest.commandId().id().toString())); assertThat(output.value().sequenceResult().isSuccess()).isEqualTo(true); }
Example #30
Source File: TopologyContext.java From simplesource with Apache License 2.0 | 5 votes |
public TopologyContext(AggregateSpec<K, C, E, A> aggregateSpec) { this.aggregateSpec = aggregateSpec; this.commandResponseRetentionInSeconds = aggregateSpec.generation().stateStoreSpec().retentionInSeconds(); serdes = aggregateSpec.serialization().serdes(); commandRequestConsumed = Consumed.with(serdes().aggregateKey(), serdes().commandRequest()); commandResponseConsumed = Consumed.with(serdes().aggregateKey(), serdes().commandResponse()); eventsConsumedProduced = Produced.with(serdes().aggregateKey(), serdes().valueWithSequence()); aggregatedUpdateProduced = Produced.with(serdes().aggregateKey(), serdes().aggregateUpdate()); commandResponseProduced = Produced.with(serdes().aggregateKey(), serdes().commandResponse()); serializedCommandResponse = Serialized.with(serdes().commandId(), serdes().commandResponse()); aggregator = aggregateSpec.generation().aggregator(); initialValue = aggregateSpec.generation().initialValue(); }