org.apache.kafka.streams.StreamsBuilder Java Examples
The following examples show how to use
org.apache.kafka.streams.StreamsBuilder.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StreamingWordCount.java From Kafka-Streams-Real-time-Stream-Processing with The Unlicense | 7 votes |
public static void main(final String[] args) { final Properties props = new Properties(); props.put(StreamsConfig.APPLICATION_ID_CONFIG, "StreamingWordCount"); props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put(StreamsConfig.STATE_DIR_CONFIG, "state-store"); props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); logger.info("Start Reading Messages"); StreamsBuilder streamBuilder = new StreamsBuilder(); KStream<String, String> KS0 = streamBuilder.stream("streaming-word-count"); KStream<String, String> KS1 = KS0.flatMapValues(value -> Arrays.asList(value.toLowerCase().split(" "))); KGroupedStream<String, String> KGS2 = KS1.groupBy((key, value) -> value); KTable<String, Long> KTS3 = KGS2.count(); KTS3.toStream().peek( (k, v) -> logger.info("Key = " + k + " Value = " + v.toString()) ); KafkaStreams streams = new KafkaStreams(streamBuilder.build(), props); streams.start(); Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); }
Example #2
Source File: FkJoinTableToTable.java From kafka-tutorials with Apache License 2.0 | 7 votes |
public Topology buildTopology(Properties envProps) { final StreamsBuilder builder = new StreamsBuilder(); final String albumTopic = envProps.getProperty("album.topic.name"); final String userTrackPurchaseTopic = envProps.getProperty("tracks.purchase.topic.name"); final String musicInterestTopic = envProps.getProperty("music.interest.topic.name"); final Serde<Long> longSerde = getPrimitiveAvroSerde(envProps, true); final Serde<MusicInterest> musicInterestSerde = getSpecificAvroSerde(envProps); final Serde<Album> albumSerde = getSpecificAvroSerde(envProps); final Serde<TrackPurchase> trackPurchaseSerde = getSpecificAvroSerde(envProps); final KTable<Long, Album> albums = builder.table(albumTopic, Consumed.with(longSerde, albumSerde)); final KTable<Long, TrackPurchase> trackPurchases = builder.table(userTrackPurchaseTopic, Consumed.with(longSerde, trackPurchaseSerde)); final MusicInterestJoiner trackJoiner = new MusicInterestJoiner(); final KTable<Long, MusicInterest> musicInterestTable = trackPurchases.join(albums, TrackPurchase::getAlbumId, trackJoiner); musicInterestTable.toStream().to(musicInterestTopic, Produced.with(longSerde, musicInterestSerde)); return builder.build(); }
Example #3
Source File: PhysicalPlanBuilder.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
public PhysicalPlanBuilder( final StreamsBuilder builder, final KsqlConfig ksqlConfig, final KafkaTopicClient kafkaTopicClient, final FunctionRegistry functionRegistry, final Map<String, Object> overriddenStreamsProperties, final boolean updateMetastore, final MetaStore metaStore, final SchemaRegistryClient schemaRegistryClient, final KafkaStreamsBuilder kafkaStreamsBuilder ) { this.builder = builder; this.ksqlConfig = ksqlConfig; this.kafkaTopicClient = kafkaTopicClient; this.functionRegistry = functionRegistry; this.overriddenStreamsProperties = overriddenStreamsProperties; this.metaStore = metaStore; this.updateMetastore = updateMetastore; this.schemaRegistryClient = schemaRegistryClient; this.kafkaStreamsBuilder = kafkaStreamsBuilder; }
Example #4
Source File: StreamUtils.java From kafka-graphs with Apache License 2.0 | 6 votes |
public static <K, V> KStream<K, V> streamFromCollection( StreamsBuilder builder, Properties props, String topic, int numPartitions, short replicationFactor, Serde<K> keySerde, Serde<V> valueSerde, Collection<KeyValue<K, V>> values) { ClientUtils.createTopic(topic, numPartitions, replicationFactor, props); try (Producer<K, V> producer = new KafkaProducer<>(props, keySerde.serializer(), valueSerde.serializer())) { for (KeyValue<K, V> value : values) { ProducerRecord<K, V> producerRecord = new ProducerRecord<>(topic, value.key, value.value); producer.send(producerRecord); } producer.flush(); } return builder.stream(topic, Consumed.with(keySerde, valueSerde)); }
Example #5
Source File: NameJoinGlobalKTable.java From fluent-kafka-streams-tests with MIT License | 6 votes |
public Topology getTopology() { final StreamsBuilder builder = new StreamsBuilder(); final KStream<Long, Long> inputStream = builder.stream(INPUT_TOPIC, Consumed.with(Serdes.Long(), Serdes.Long())); final GlobalKTable<Long, String> joinTable = builder.globalTable(NAME_INPUT); inputStream .join(joinTable, (id, valueId) -> valueId, (id, name) -> name) .to(OUTPUT_TOPIC, Produced.with(Serdes.Long(), Serdes.String())); return builder.build(); }
Example #6
Source File: KsqlStructuredDataOutputNodeTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
@Test public void shouldCreateSinkWithCorrectCleanupPolicyStream() { KafkaTopicClient topicClientForWindowTable = EasyMock.mock(KafkaTopicClient.class); StreamsBuilder streamsBuilder = new StreamsBuilder(); topicClientForWindowTable.createTopic("output", 4, (short) 3, Collections.emptyMap()); EasyMock.replay(topicClientForWindowTable); SchemaKStream schemaKStream = outputNode.buildStream( streamsBuilder, ksqlConfig, topicClientForWindowTable, new FunctionRegistry(), new HashMap<>(), new MockSchemaRegistryClient()); assertThat(schemaKStream, instanceOf(SchemaKStream.class)); EasyMock.verify(); }
Example #7
Source File: KsqlStructuredDataOutputNodeTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
@Test public void shouldCreateSinkWithCorrectCleanupPolicyWindowedTable() { KafkaTopicClient topicClientForWindowTable = EasyMock.mock(KafkaTopicClient.class); KsqlStructuredDataOutputNode outputNode = getKsqlStructuredDataOutputNode(true); StreamsBuilder streamsBuilder = new StreamsBuilder(); topicClientForWindowTable.createTopic("output", 4, (short) 3, Collections.emptyMap()); EasyMock.replay(topicClientForWindowTable); SchemaKStream schemaKStream = outputNode.buildStream( streamsBuilder, ksqlConfig, topicClientForWindowTable, new FunctionRegistry(), new HashMap<>(), new MockSchemaRegistryClient()); assertThat(schemaKStream, instanceOf(SchemaKTable.class)); EasyMock.verify(); }
Example #8
Source File: CampaignPerformanceApp.java From Kafka-Streams-Real-time-Stream-Processing with The Unlicense | 6 votes |
public static void main(String[] args) { Properties properties = new Properties(); properties.put(StreamsConfig.APPLICATION_ID_CONFIG, AppConfigs.applicationID); properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers); properties.put(StreamsConfig.STATE_DIR_CONFIG, AppConfigs.stateStoreLocation); StreamsBuilder builder = new StreamsBuilder(); AppTopology.withBuilder(builder); builder.stream( AppConfigs.outputTopic, Consumed.with(AppSerdes.String(), AppSerdes.CampaignPerformance()) ).foreach((k, v) -> logger.info("outside = " + v)); Topology topology = builder.build(); KafkaStreams streams = new KafkaStreams(topology, properties); streams.start(); Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); }
Example #9
Source File: PhysicalPlanBuilder.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
public PhysicalPlanBuilder( final StreamsBuilder builder, final KsqlConfig ksqlConfig, final KafkaTopicClient kafkaTopicClient, final FunctionRegistry functionRegistry, final Map<String, Object> overriddenStreamsProperties, final boolean updateMetastore, final MetaStore metaStore, final SchemaRegistryClient schemaRegistryClient ) { this( builder, ksqlConfig, kafkaTopicClient, functionRegistry, overriddenStreamsProperties, updateMetastore, metaStore, schemaRegistryClient, new KafkaStreamsBuilderImpl() ); }
Example #10
Source File: AggregatingCount.java From kafka-tutorials with Apache License 2.0 | 6 votes |
public Topology buildTopology(Properties envProps, final SpecificAvroSerde<TicketSale> ticketSaleSerde) { final StreamsBuilder builder = new StreamsBuilder(); final String inputTopic = envProps.getProperty("input.topic.name"); final String outputTopic = envProps.getProperty("output.topic.name"); builder.stream(inputTopic, Consumed.with(Serdes.String(), ticketSaleSerde)) // Set key to title and value to ticket value .map((k, v) -> new KeyValue<>((String) v.getTitle(), (Integer) v.getTicketTotalValue())) // Group by title .groupByKey(Grouped.with(Serdes.String(), Serdes.Integer())) // Apply COUNT method .count() // Write to stream specified by outputTopic .toStream().to(outputTopic, Produced.with(Serdes.String(), Serdes.Long())); return builder.build(); }
Example #11
Source File: GraphAlgorithmHandler.java From kafka-graphs with Apache License 2.0 | 6 votes |
public Mono<ServerResponse> configure(ServerRequest request) { List<String> appIdHeaders = request.headers().header(X_KGRAPH_APPID); String appId = appIdHeaders.isEmpty() ? ClientUtils.generateRandomHexString(8) : appIdHeaders.iterator().next(); return request.bodyToMono(GraphAlgorithmCreateRequest.class) .doOnNext(input -> { PregelGraphAlgorithm<?, ?, ?, ?> algorithm = getAlgorithm(appId, input); StreamsBuilder builder = new StreamsBuilder(); Properties streamsConfig = streamsConfig( appId, props.getBootstrapServers(), algorithm.serialized().keySerde(), algorithm.serialized().vertexValueSerde() ); algorithm.configure(builder, streamsConfig); algorithms.put(appId, algorithm); }) .flatMapMany(input -> proxyConfigure(appIdHeaders.isEmpty() ? group.getCurrentMembers().keySet() : Collections.emptySet(), appId, input)) .then(ServerResponse.ok() .contentType(MediaType.APPLICATION_JSON) .body(Mono.just(new GraphAlgorithmId(appId)), GraphAlgorithmId.class)); }
Example #12
Source File: StructuredDataSourceNode.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
private KTable createKTable( StreamsBuilder builder, final Topology.AutoOffsetReset autoOffsetReset, final KsqlTable ksqlTable, final Serde<GenericRow> genericRowSerde, final Serde<GenericRow> genericRowSerdeAfterRead ) { if (ksqlTable.isWindowed()) { return table( builder.stream( ksqlTable.getKsqlTopic().getKafkaTopicName(), Consumed.with(windowedSerde, genericRowSerde).withOffsetResetPolicy(autoOffsetReset) ).mapValues(windowedMapper).transformValues(new AddTimestampColumn()), windowedSerde, genericRowSerdeAfterRead ); } else { return table( builder.stream( ksqlTable.getKsqlTopic().getKafkaTopicName(), Consumed.with(Serdes.String(), genericRowSerde).withOffsetResetPolicy(autoOffsetReset) ).mapValues(nonWindowedValueMapper).transformValues(new AddTimestampColumn()), Serdes.String(), genericRowSerdeAfterRead ); } }
Example #13
Source File: KafkaDenormalizer.java From cqrs-eventsourcing-kafka with Apache License 2.0 | 6 votes |
@Override public void start() throws Exception { Predicate<String, EventEnvelope> inventoryItemCreated = (k, v) -> k.equals(InventoryItemCreated.class.getSimpleName()); Predicate<String, EventEnvelope> inventoryItemRenamed = (k, v) -> k.equals(InventoryItemRenamed.class.getSimpleName()); Predicate<String, EventEnvelope> inventoryItemDeactivated = (k, v) -> k.equals(InventoryItemDeactivated.class.getSimpleName()); StreamsBuilder builder = new StreamsBuilder(); KStream<String, EventEnvelope>[] filteredStreams = builder .stream(INVENTORY_ITEM_TOPIC, Consumed.with(Serdes.String(), initializeEnvelopeSerde())) .selectKey((k, v) -> v.eventType) .branch(inventoryItemCreated, inventoryItemRenamed, inventoryItemDeactivated); filteredStreams[0].process(InventoryItemCreatedHandler::new); filteredStreams[1].process(InventoryItemRenamedHandler::new); filteredStreams[2].process(InventoryItemDeactivatedHandler::new); kafkaStreams = new KafkaStreams(builder.build(), getProperties()); kafkaStreams.cleanUp(); // -- only because we are using in-memory kafkaStreams.start(); }
Example #14
Source File: KafkaStreamsStreamListenerSetupMethodOrchestrator.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 6 votes |
private KStream<?, ?> getkStream(String inboundName, KafkaStreamsStateStoreProperties storeSpec, BindingProperties bindingProperties, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, StreamsBuilder streamsBuilder, Serde<?> keySerde, Serde<?> valueSerde, Topology.AutoOffsetReset autoOffsetReset, boolean firstBuild) { if (storeSpec != null) { StoreBuilder storeBuilder = buildStateStore(storeSpec); streamsBuilder.addStateStore(storeBuilder); if (LOG.isInfoEnabled()) { LOG.info("state store " + storeBuilder.name() + " added to topology"); } } return getKStream(inboundName, bindingProperties, kafkaStreamsConsumerProperties, streamsBuilder, keySerde, valueSerde, autoOffsetReset, firstBuild); }
Example #15
Source File: GraphGenerators.java From kafka-graphs with Apache License 2.0 | 5 votes |
public static KGraph<Long, Long, Long> starGraph( StreamsBuilder builder, Properties producerConfig, int numVertices) { List<KeyValue<Edge<Long>, Long>> edgeList = new ArrayList<>(); for (long i = 1; i < numVertices; i++) { edgeList.add(new KeyValue<>(new Edge<>(i, 0L), 1L)); } KTable<Edge<Long>, Long> edges = StreamUtils.tableFromCollection( builder, producerConfig, new KryoSerde<>(), Serdes.Long(), edgeList); return KGraph.fromEdges(edges, v -> 1L, GraphSerialized.with(Serdes.Long(), Serdes.Long(), Serdes.Long())); }
Example #16
Source File: OrderService.java From qcon-microservices with Apache License 2.0 | 5 votes |
/** * Create a table of orders which we can query. When the table is updated * we check to see if there is an outstanding HTTP GET request waiting to be * fulfilled. */ private StreamsBuilder createOrdersMaterializedView() { StreamsBuilder builder = new StreamsBuilder(); builder.table(Schemas.Topics.ORDERS.name(), Consumed.with(Schemas.Topics.ORDERS.keySerde(), Schemas.Topics.ORDERS.valueSerde()), Materialized.as(ORDERS_STORE_NAME)) .toStream().foreach(this::maybeCompleteLongPollGet); return builder; }
Example #17
Source File: LastLoginDemo.java From Kafka-Streams-Real-time-Stream-Processing with The Unlicense | 5 votes |
public static void main(String[] args) { Properties properties = new Properties(); properties.put(StreamsConfig.APPLICATION_ID_CONFIG, AppConfigs.applicationID); properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers); properties.put(StreamsConfig.STATE_DIR_CONFIG, AppConfigs.stateStoreName); StreamsBuilder streamsBuilder = new StreamsBuilder(); KTable<String, UserDetails> userDetailsKTable = streamsBuilder.table( AppConfigs.userMasterTopic, Consumed.with(AppSerdes.String(), AppSerdes.UserDetails()) ); KTable<String, UserLogin> userLoginKTable = streamsBuilder.table( AppConfigs.lastLoginTopic, Consumed.with(AppSerdes.String(), AppSerdes.UserLogin()) ); userLoginKTable.join(userDetailsKTable, (userLogin, userDetails) -> { userDetails.setLastLogin(userLogin.getCreatedTime()); return userDetails; } ).toStream().to(AppConfigs.userMasterTopic, Produced.with(AppSerdes.String(), AppSerdes.UserDetails())); userDetailsKTable.toStream().foreach( (k, userDetails) -> logger.info( "Key = " + k + " Value = " + userDetails ) ); KafkaStreams streams = new KafkaStreams(streamsBuilder.build(), properties); streams.start(); Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); }
Example #18
Source File: PhysicalPlanBuilder.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
private KafkaStreams buildStreams( final OutputNode outputNode, final StreamsBuilder builder, final String applicationId, final KsqlConfig ksqlConfig, final Map<String, Object> overriddenProperties ) { Map<String, Object> newStreamsProperties = ksqlConfig.getKsqlStreamConfigProps(); newStreamsProperties.putAll(overriddenProperties); newStreamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId); newStreamsProperties.put( ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, ksqlConfig.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG) ); newStreamsProperties.put( StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, ksqlConfig.get(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG) ); newStreamsProperties.put( StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, ksqlConfig.get(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG) ); final Integer timestampIndex = (Integer) ksqlConfig.get(KsqlConfig.KSQL_TIMESTAMP_COLUMN_INDEX); if (timestampIndex != null && timestampIndex >= 0) { outputNode.getSourceTimestampExtractionPolicy().applyTo(ksqlConfig, newStreamsProperties); } updateListProperty( newStreamsProperties, StreamsConfig.consumerPrefix(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG), ConsumerCollector.class.getCanonicalName() ); updateListProperty( newStreamsProperties, StreamsConfig.producerPrefix(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG), ProducerCollector.class.getCanonicalName() ); return kafkaStreamsBuilder.buildKafkaStreams(builder, new StreamsConfig(newStreamsProperties)); }
Example #19
Source File: TracingKafkaStreamsTest.java From java-kafka-client with Apache License 2.0 | 5 votes |
@Test public void test() { Map<String, Object> senderProps = KafkaTestUtils .producerProps(embeddedKafka.getEmbeddedKafka()); Properties config = new Properties(); config.put(StreamsConfig.APPLICATION_ID_CONFIG, "stream-app"); config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, senderProps.get("bootstrap.servers")); config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Integer().getClass()); config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); Producer<Integer, String> producer = createProducer(); ProducerRecord<Integer, String> record = new ProducerRecord<>("stream-test", 1, "test"); producer.send(record); final Serde<String> stringSerde = Serdes.String(); final Serde<Integer> intSerde = Serdes.Integer(); StreamsBuilder builder = new StreamsBuilder(); KStream<Integer, String> kStream = builder.stream("stream-test"); kStream.map((key, value) -> new KeyValue<>(key, value + "map")) .to("stream-out", Produced.with(intSerde, stringSerde)); KafkaStreams streams = new KafkaStreams(builder.build(), config, new TracingKafkaClientSupplier(mockTracer)); streams.start(); await().atMost(15, TimeUnit.SECONDS).until(reportedSpansSize(), equalTo(3)); streams.close(); producer.close(); List<MockSpan> spans = mockTracer.finishedSpans(); assertEquals(3, spans.size()); checkSpans(spans); assertNull(mockTracer.activeSpan()); }
Example #20
Source File: ProjectNodeTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
private void mockSourceNode() { EasyMock.expect(source.getKeyField()).andReturn(new Field("field1", 0, Schema.STRING_SCHEMA)); EasyMock.expect(source.buildStream(anyObject(StreamsBuilder.class), anyObject(KsqlConfig.class), anyObject(KafkaTopicClient.class), anyObject(FunctionRegistry.class), eq(props), anyObject(SchemaRegistryClient.class))).andReturn(stream); }
Example #21
Source File: SchemaKTableTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
@Before public void init() { functionRegistry = new FunctionRegistry(); ksqlTable = (KsqlTable) metaStore.getSource("TEST2"); StreamsBuilder builder = new StreamsBuilder(); kTable = builder .table(ksqlTable.getKsqlTopic().getKafkaTopicName(), Consumed.with(Serdes.String() , ksqlTable.getKsqlTopic().getKsqlTopicSerDe().getGenericRowSerde(null, new KsqlConfig(Collections.emptyMap()), false, new MockSchemaRegistryClient()))); }
Example #22
Source File: KsqlStructuredDataOutputNodeTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
private SchemaKStream buildStream() { builder = new StreamsBuilder(); return outputNode.buildStream(builder, ksqlConfig, topicClient, new FunctionRegistry(), new HashMap<>(), new MockSchemaRegistryClient()); }
Example #23
Source File: ErrorImporter.java From SkaETL with Apache License 2.0 | 5 votes |
public void activate() { log.info("Activating error importer"); StreamsBuilder builder = new StreamsBuilder(); final Serde<ErrorData> errorDataSerde = Serdes.serdeFrom(new GenericSerializer<>(), new GenericDeserializer<>(ErrorData.class)); KStream<String, ErrorData> streamToES = builder.stream(kafkaConfiguration.getErrorTopic(), Consumed.with(Serdes.String(), errorDataSerde)); streamToES.process(() -> elasticsearchProcessor); errorStream = new KafkaStreams(builder.build(), KafkaUtils.createKStreamProperties(INPUT_PROCESS_ERROR, kafkaConfiguration.getBootstrapServers())); Runtime.getRuntime().addShutdownHook(new Thread(errorStream::close)); errorStream.start(); }
Example #24
Source File: ITKafkaStreamsTracing.java From brave with Apache License 2.0 | 5 votes |
@Test public void should_create_spans_from_stream_with_tracing_map() { String inputTopic = testName.getMethodName() + "-input"; String outputTopic = testName.getMethodName() + "-output"; StreamsBuilder builder = new StreamsBuilder(); builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String())) .transform(kafkaStreamsTracing.map("map-1", (key, value) -> { try { Thread.sleep(100L); } catch (InterruptedException e) { e.printStackTrace(); } return KeyValue.pair(key, value); })) .to(outputTopic, Produced.with(Serdes.String(), Serdes.String())); Topology topology = builder.build(); KafkaStreams streams = buildKafkaStreams(topology); send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE)); waitForStreamToRun(streams); MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER); assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic); MutableSpan spanProcessor = testSpanHandler.takeLocalSpan(); assertChildOf(spanProcessor, spanInput); MutableSpan spanOutput = testSpanHandler.takeRemoteSpan(PRODUCER); assertThat(spanOutput.tags()).containsEntry("kafka.topic", outputTopic); assertChildOf(spanOutput, spanProcessor); streams.close(); streams.cleanUp(); }
Example #25
Source File: GraphOperationsITCase.java From kafka-graphs with Apache License 2.0 | 5 votes |
@Test public void testOutDegrees() throws Exception { Properties producerConfig = ClientUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class, LongSerializer.class, new Properties() ); StreamsBuilder builder = new StreamsBuilder(); KTable<Long, Long> vertices = StreamUtils.tableFromCollection(builder, producerConfig, Serdes.Long(), Serdes.Long(), TestGraphUtils.getLongLongVertices()); KTable<Edge<Long>, Long> edges = StreamUtils.tableFromCollection(builder, producerConfig, new KryoSerde<>(), Serdes.Long(), TestGraphUtils.getLongLongEdges()); KGraph<Long, Long, Long> graph = new KGraph<>( vertices, edges, GraphSerialized.with(Serdes.Long(), Serdes.Long(), Serdes.Long())); KTable<Long, Long> outDegrees = graph.outDegrees(); expectedResult = "1,2\n" + "2,1\n" + "3,2\n" + "4,1\n" + "5,1\n"; startStreams(builder, Serdes.Long(), Serdes.Long()); Thread.sleep(5000); List<KeyValue<Long, Long>> result = StreamUtils.listFromTable(streams, outDegrees); compareResultAsTuples(result, expectedResult); }
Example #26
Source File: ITKafkaStreamsTracing.java From brave with Apache License 2.0 | 5 votes |
@Test public void should_create_spans_from_stream_with_tracing_peek() { String inputTopic = testName.getMethodName() + "-input"; String outputTopic = testName.getMethodName() + "-output"; long now = System.currentTimeMillis(); StreamsBuilder builder = new StreamsBuilder(); builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String())) .transformValues(kafkaStreamsTracing.peek("peek-1", (key, value) -> { try { Thread.sleep(100L); } catch (InterruptedException e) { e.printStackTrace(); } tracing.tracer().currentSpan().annotate(now, "test"); })) .to(outputTopic, Produced.with(Serdes.String(), Serdes.String())); Topology topology = builder.build(); KafkaStreams streams = buildKafkaStreams(topology); send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE)); waitForStreamToRun(streams); MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER); assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic); MutableSpan spanProcessor = testSpanHandler.takeLocalSpan(); assertChildOf(spanProcessor, spanInput); assertThat(spanProcessor.annotations()).contains(entry(now, "test")); MutableSpan spanOutput = testSpanHandler.takeRemoteSpan(PRODUCER); assertThat(spanOutput.tags()).containsEntry("kafka.topic", outputTopic); assertChildOf(spanOutput, spanProcessor); streams.close(); streams.cleanUp(); }
Example #27
Source File: AbstractKafkaStreamsBinderProcessor.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
protected KStream<?, ?> getKStream(String inboundName, BindingProperties bindingProperties, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, StreamsBuilder streamsBuilder, Serde<?> keySerde, Serde<?> valueSerde, Topology.AutoOffsetReset autoOffsetReset, boolean firstBuild) { if (firstBuild) { addStateStoreBeans(streamsBuilder); } KStream<?, ?> stream; if (this.kafkaStreamsExtendedBindingProperties .getExtendedConsumerProperties(inboundName).isDestinationIsPattern()) { final Pattern pattern = Pattern.compile(this.bindingServiceProperties.getBindingDestination(inboundName)); stream = streamsBuilder.stream(pattern); } else { String[] bindingTargets = StringUtils.commaDelimitedListToStringArray( this.bindingServiceProperties.getBindingDestination(inboundName)); final Consumed<?, ?> consumed = getConsumed(kafkaStreamsConsumerProperties, keySerde, valueSerde, autoOffsetReset); stream = streamsBuilder.stream(Arrays.asList(bindingTargets), consumed); } final boolean nativeDecoding = this.bindingServiceProperties .getConsumerProperties(inboundName).isUseNativeDecoding(); if (nativeDecoding) { LOG.info("Native decoding is enabled for " + inboundName + ". Inbound deserialization done at the broker."); } else { LOG.info("Native decoding is disabled for " + inboundName + ". Inbound message conversion done by Spring Cloud Stream."); } return getkStream(bindingProperties, stream, nativeDecoding); }
Example #28
Source File: ProcessStreamService.java From SkaETL with Apache License 2.0 | 5 votes |
public void createStreamSystemOut(String inputTopic) { StreamsBuilder builder = new StreamsBuilder(); builder.stream(inputTopic, Consumed.with(Serdes.String(), GenericSerdes.jsonNodeSerde())).process(() -> new LoggingProcessor<>()); KafkaStreams streams = new KafkaStreams(builder.build(), KafkaUtils.createKStreamProperties(getProcessConsumer().getIdProcess() + ProcessConstants.SYSOUT_PROCESS, getBootstrapServer())); Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); streams.start(); addStreams(streams); }
Example #29
Source File: ITKafkaStreamsTracing.java From brave with Apache License 2.0 | 5 votes |
@Test public void should_create_spans_from_stream_with_tracing_processor() { ProcessorSupplier<String, String> processorSupplier = kafkaStreamsTracing.processor( "forward-1", () -> new AbstractProcessor<String, String>() { @Override public void process(String key, String value) { try { Thread.sleep(100L); } catch (InterruptedException e) { e.printStackTrace(); } } }); String inputTopic = testName.getMethodName() + "-input"; StreamsBuilder builder = new StreamsBuilder(); builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String())) .process(processorSupplier); Topology topology = builder.build(); KafkaStreams streams = buildKafkaStreams(topology); send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE)); waitForStreamToRun(streams); MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER); assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic); MutableSpan spanProcessor = testSpanHandler.takeLocalSpan(); assertChildOf(spanProcessor, spanInput); streams.close(); streams.cleanUp(); }
Example #30
Source File: KafkaAnomalyDetectorMapper.java From adaptive-alerting with Apache License 2.0 | 5 votes |
@Override protected Topology buildTopology() { val config = getConfig(); val inputTopic = config.getInputTopic(); val defaultOutputTopic = config.getOutputTopic(); log.info("Initializing: inputTopic={}, defaultOutputTopic={}", inputTopic, defaultOutputTopic); val builder = new StreamsBuilder(); // create store StoreBuilder<KeyValueStore<String, MetricData>> keyValueStoreBuilder = Stores.keyValueStoreBuilder(Stores.inMemoryKeyValueStore(STATE_STORE_NAME), Serdes.String(), new MetricDataJsonSerde()) .withLoggingDisabled(); // register store builder.addStateStore(keyValueStoreBuilder); //Dynamically choose kafka topic depending on the consumer id. final TopicNameExtractor<String, MappedMetricData> kafkaTopicNameExtractor = (key, mappedMetricData, recordContext) -> { final String consumerId = mappedMetricData.getConsumerId(); if (DEFAULT_CONSUMER_ID.equals(consumerId)) { return defaultOutputTopic; } return defaultOutputTopic + "-" + consumerId; }; final KStream<String, MetricData> stream = builder.stream(inputTopic); stream .filter((key, md) -> md != null) .transform(new MetricDataTransformerSupplier(mapper, STATE_STORE_NAME), STATE_STORE_NAME) .flatMap(this::metricsByDetector) .to(kafkaTopicNameExtractor, Produced.with(outputKeySerde, outputValueSerde)); return builder.build(); }