org.apache.kafka.streams.state.Stores Java Examples
The following examples show how to use
org.apache.kafka.streams.state.Stores.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: WordCountTopology.java From KafkaExample with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws IOException { Properties props = new Properties(); props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-wordcount-processor"); props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka0:19092"); props.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "zookeeper0:12181/kafka"); props.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.Integer().getClass()); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); TopologyBuilder builder = new TopologyBuilder(); builder.addSource("SOURCE", new StringDeserializer(), new StringDeserializer(), "words") .addProcessor("WordCountProcessor", WordCountProcessor::new, "SOURCE") .addStateStore(Stores.create("Counts").withStringKeys().withIntegerValues().inMemory().build(), "WordCountProcessor") // .connectProcessorAndStateStores("WordCountProcessor", "Counts") .addSink("SINK", "count", new StringSerializer(), new IntegerSerializer(), "WordCountProcessor"); KafkaStreams stream = new KafkaStreams(builder, props); stream.start(); System.in.read(); stream.close(); stream.cleanUp(); }
Example #2
Source File: SpanAggregationTopology.java From zipkin-storage-kafka with Apache License 2.0 | 5 votes |
@Override public Topology get() { StreamsBuilder builder = new StreamsBuilder(); if (aggregationEnabled) { // Aggregate Spans to Traces KStream<String, List<Span>> tracesStream = builder.stream(spansTopic, Consumed.with(Serdes.String(), spansSerde)) .groupByKey() // how long to wait for another span .windowedBy(SessionWindows.with(traceTimeout).grace(Duration.ZERO)) .aggregate(ArrayList::new, aggregateSpans(), joinAggregates(), Materialized .<String, List<Span>>as( Stores.persistentSessionStore(TRACE_AGGREGATION_STORE, Duration.ofDays(1))) .withKeySerde(Serdes.String()) .withValueSerde(spansSerde) .withLoggingDisabled() .withCachingEnabled()) // hold until a new record tells that a window is closed and we can process it further .suppress(untilWindowCloses(unbounded())) .toStream() .selectKey((windowed, spans) -> windowed.key()); // Downstream to traces topic tracesStream.to(traceTopic, Produced.with(Serdes.String(), spansSerde)); // Map to dependency links tracesStream.flatMapValues(spansToDependencyLinks()) .selectKey((key, value) -> linkKey(value)) .to(dependencyTopic, Produced.with(Serdes.String(), dependencyLinkSerde)); } return builder.build(); }
Example #3
Source File: KStreamsTopologyDescriptionParserTest.java From netbeans-mmd-plugin with Apache License 2.0 | 5 votes |
@Test public void testKsDsl2() { final String storeName = "stateStore"; final String globalStoreName = "glob-stateStore"; final StreamsBuilder builder = new StreamsBuilder(); final StoreBuilder<KeyValueStore<String, String>> storeBuilder = Stores.keyValueStoreBuilder( Stores.persistentKeyValueStore(storeName), Serdes.String(), Serdes.String()); final StoreBuilder<KeyValueStore<String, String>> globalStoreBuilder = Stores.keyValueStoreBuilder( Stores.persistentKeyValueStore(globalStoreName), Serdes.String(), Serdes.String()); builder.addGlobalStore(globalStoreBuilder, "some-global-topic", Consumed.with(Serdes.Short(), Serdes.String(), new WallclockTimestampExtractor(), Topology.AutoOffsetReset.EARLIEST), FakeProcessor::new); builder.addStateStore(storeBuilder); builder.<String, String>stream("input") .filter((k, v) -> v.endsWith("FOO")) .through("some-through-topic") .transformValues(() -> new SimpleValueTransformer(storeName), storeName) .to("output"); final Topology topology = builder.build(); final String text = topology.describe().toString(); System.out.println(text); final KStreamsTopologyDescriptionParser parsed = new KStreamsTopologyDescriptionParser(text); assertEquals(8, parsed.size()); }
Example #4
Source File: StockSummaryStatefulProcessorDriver.java From kafka-streams with Apache License 2.0 | 5 votes |
public static void main(String[] args) { StreamsConfig streamingConfig = new StreamsConfig(getProperties()); TopologyBuilder builder = new TopologyBuilder(); JsonSerializer<StockTransactionSummary> stockTxnSummarySerializer = new JsonSerializer<>(); JsonDeserializer<StockTransactionSummary> stockTxnSummaryDeserializer = new JsonDeserializer<>(StockTransactionSummary.class); JsonDeserializer<StockTransaction> stockTxnDeserializer = new JsonDeserializer<>(StockTransaction.class); JsonSerializer<StockTransaction> stockTxnJsonSerializer = new JsonSerializer<>(); StringSerializer stringSerializer = new StringSerializer(); StringDeserializer stringDeserializer = new StringDeserializer(); Serde<StockTransactionSummary> stockTransactionSummarySerde = Serdes.serdeFrom(stockTxnSummarySerializer,stockTxnSummaryDeserializer); builder.addSource("stocks-source", stringDeserializer, stockTxnDeserializer, "stocks") .addProcessor("summary", StockSummaryProcessor::new, "stocks-source") .addStateStore(Stores.create("stock-transactions").withStringKeys() .withValues(stockTransactionSummarySerde).inMemory().maxEntries(100).build(),"summary") .addSink("sink", "stocks-out", stringSerializer,stockTxnJsonSerializer,"stocks-source") .addSink("sink-2", "transaction-summary", stringSerializer, stockTxnSummarySerializer, "summary"); System.out.println("Starting StockSummaryStatefulProcessor Example"); KafkaStreams streaming = new KafkaStreams(builder, streamingConfig); streaming.start(); System.out.println("StockSummaryStatefulProcessor Example now started"); }
Example #5
Source File: CommandProcessor.java From cqrs-manager-for-distributed-reactive-services with Apache License 2.0 | 5 votes |
public void start() { KStreamBuilder builder = new KStreamBuilder(); Serde<UUID> keySerde = new FressianSerde(); Serde<Map> valSerde = new FressianSerde(); KStream<UUID, Map> commands = builder.stream(keySerde, valSerde, commandsTopic); KStream<UUID, Map> customerEvents = commands .filter((id, command) -> command.get(new Keyword("action")).equals(new Keyword("create-customer"))) .map((id, command) -> { logger.debug("Command received"); Map userEvent = new HashMap(command); userEvent.put(new Keyword("action"), new Keyword("customer-created")); userEvent.put(new Keyword("parent"), id); Map userValue = (Map) userEvent.get(new Keyword("data")); userValue.put(new Keyword("id"), UUID.randomUUID()); return new KeyValue<>(UUID.randomUUID(), userEvent); }).through(keySerde, valSerde, eventsTopic); KStream<UUID, Map> customers = customerEvents .map((id, event) -> { Map customer = (Map) event.get(new Keyword("data")); UUID customerId = (UUID) customer.get(new Keyword("id")); return new KeyValue<UUID, Map>(customerId, customer); }); customers.through(keySerde, valSerde, customersTopic); StateStoreSupplier store = Stores.create("Customers") .withKeys(keySerde) .withValues(valSerde) .persistent() .build(); builder.addStateStore(store); customers.process(customerStore, "Customers"); this.kafkaStreams = new KafkaStreams(builder, kafkaStreamsConfig); this.kafkaStreams.start(); }
Example #6
Source File: KafkaStreamsFunctionStateStoreTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@Bean public StoreBuilder otherStore() { return Stores.windowStoreBuilder( Stores.persistentWindowStore("other-store", Duration.ofSeconds(3), Duration.ofSeconds(3), false), Serdes.Long(), Serdes.Long()); }
Example #7
Source File: KafkaStreamsStateStoreIntegrationTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@Bean public StoreBuilder mystore() { return Stores.windowStoreBuilder( Stores.persistentWindowStore("mystate", 3L, 3, 3L, false), Serdes.String(), Serdes.String()); }
Example #8
Source File: CPUMetricStreamHandler.java From kafka-streams-example with Apache License 2.0 | 5 votes |
private TopologyBuilder processingTopologyBuilder() { StateStoreSupplier machineToAvgCPUUsageStore = Stores.create(AVG_STORE_NAME) .withStringKeys() .withDoubleValues() .inMemory() .build(); StateStoreSupplier machineToNumOfRecordsReadStore = Stores.create(NUM_RECORDS_STORE_NAME) .withStringKeys() .withIntegerValues() .inMemory() .build(); TopologyBuilder builder = new TopologyBuilder(); builder.addSource(SOURCE_NAME, TOPIC_NAME) .addProcessor(PROCESSOR_NAME, new ProcessorSupplier() { @Override public Processor get() { return new CPUCumulativeAverageProcessor(); } }, SOURCE_NAME) .addStateStore(machineToAvgCPUUsageStore, PROCESSOR_NAME) .addStateStore(machineToNumOfRecordsReadStore, PROCESSOR_NAME); LOGGER.info("Kafka streams processing topology ready"); return builder; }
Example #9
Source File: KafkaStreamsPipeline.java From quarkus with Apache License 2.0 | 5 votes |
@Produces public Topology buildTopology() { StreamsBuilder builder = new StreamsBuilder(); ObjectMapperSerde<Category> categorySerde = new ObjectMapperSerde<>(Category.class); ObjectMapperSerde<Customer> customerSerde = new ObjectMapperSerde<>(Customer.class); ObjectMapperSerde<EnrichedCustomer> enrichedCustomerSerde = new ObjectMapperSerde<>(EnrichedCustomer.class); KTable<Integer, Category> categories = builder.table( "streams-test-categories", Consumed.with(Serdes.Integer(), categorySerde)); KStream<Integer, EnrichedCustomer> customers = builder .stream("streams-test-customers", Consumed.with(Serdes.Integer(), customerSerde)) .selectKey((id, customer) -> customer.category) .join( categories, (customer, category) -> { return new EnrichedCustomer(customer.id, customer.name, category); }, Joined.with(Serdes.Integer(), customerSerde, categorySerde)); KeyValueBytesStoreSupplier storeSupplier = Stores.inMemoryKeyValueStore("countstore"); customers.groupByKey() .count(Materialized.<Integer, Long> as(storeSupplier)); customers.selectKey((categoryId, customer) -> customer.id) .to("streams-test-customers-processed", Produced.with(Serdes.Integer(), enrichedCustomerSerde)); return builder.build(); }
Example #10
Source File: TestDriverInitializer.java From simplesource with Apache License 2.0 | 5 votes |
TestDriverInitializer withStateStore(String stateStoreName, Serde<?> keySerde, Serde<?> valueSerde) { streamsBuilder.addStateStore( Stores.keyValueStoreBuilder(Stores.inMemoryKeyValueStore(stateStoreName), keySerde, valueSerde) .withLoggingDisabled()); return this; }
Example #11
Source File: StreamsUtils.java From football-events with MIT License | 5 votes |
public static <D, E extends Event> void addStore(Topology topology, Class<D> domainType, String store, Class<E>... eventTypes) { StoreBuilder<KeyValueStore<String, D>> matchStoreBuilder = Stores.keyValueStoreBuilder( Stores.persistentKeyValueStore(store), Serdes.String(), new JsonPojoSerde<D>(domainType)) .withLoggingDisabled(); String[] processorNames = Stream.of(eventTypes) .map(event -> event.getSimpleName() + "Process") .collect(Collectors.toList()).toArray(new String[eventTypes.length]); topology.addStateStore(matchStoreBuilder, processorNames); }
Example #12
Source File: FindDistinctEvents.java From kafka-tutorials with Apache License 2.0 | 5 votes |
public Topology buildTopology(Properties envProps, final SpecificAvroSerde<Click> clicksSerde) { final StreamsBuilder builder = new StreamsBuilder(); final String inputTopic = envProps.getProperty("input.topic.name"); final String outputTopic = envProps.getProperty("output.topic.name"); // How long we "remember" an event. During this time, any incoming duplicates of the event // will be, well, dropped, thereby de-duplicating the input data. // // The actual value depends on your use case. To reduce memory and disk usage, you could // decrease the size to purge old windows more frequently at the cost of potentially missing out // on de-duplicating late-arriving records. final Duration windowSize = Duration.ofMinutes(2); // retention period must be at least window size -- for this use case, we don't need a longer retention period // and thus just use the window size as retention time final Duration retentionPeriod = windowSize; final StoreBuilder<WindowStore<String, Long>> dedupStoreBuilder = Stores.windowStoreBuilder( Stores.persistentWindowStore(storeName, retentionPeriod, windowSize, false ), Serdes.String(), Serdes.Long()); builder.addStateStore(dedupStoreBuilder); builder .stream(inputTopic, Consumed.with(Serdes.String(), clicksSerde)) .transformValues(() -> new DeduplicationTransformer<>(windowSize.toMillis(), (key, value) -> value.getIp()), storeName) .filter((k, v) -> v != null) .to(outputTopic, Produced.with(Serdes.String(), clicksSerde)); return builder.build(); }
Example #13
Source File: StockPerformanceApplication.java From kafka-streams-in-action with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Deserializer<String> stringDeserializer = Serdes.String().deserializer(); Serializer<String> stringSerializer = Serdes.String().serializer(); Serde<StockPerformance> stockPerformanceSerde = StreamsSerdes.StockPerformanceSerde(); Serializer<StockPerformance> stockPerformanceSerializer = stockPerformanceSerde.serializer(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); Deserializer<StockTransaction> stockTransactionDeserializer = stockTransactionSerde.deserializer(); Topology topology = new Topology(); String stocksStateStore = "stock-performance-store"; double differentialThreshold = 0.02; KeyValueBytesStoreSupplier storeSupplier = Stores.inMemoryKeyValueStore(stocksStateStore); StoreBuilder<KeyValueStore<String, StockPerformance>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), stockPerformanceSerde); topology.addSource("stocks-source", stringDeserializer, stockTransactionDeserializer,"stock-transactions") .addProcessor("stocks-processor", () -> new StockPerformanceProcessor(stocksStateStore, differentialThreshold), "stocks-source") .addStateStore(storeBuilder,"stocks-processor") .addSink("stocks-sink", "stock-performance", stringSerializer, stockPerformanceSerializer, "stocks-processor"); topology.addProcessor("stocks-printer", new KStreamPrinter("StockPerformance"), "stocks-processor"); KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig); MockDataProducer.produceStockTransactionsWithKeyFunction(50,50, 25, StockTransaction::getSymbol); System.out.println("Stock Analysis App Started"); kafkaStreams.cleanUp(); kafkaStreams.start(); Thread.sleep(70000); System.out.println("Shutting down the Stock Analysis App now"); kafkaStreams.close(); MockDataProducer.shutdown(); }
Example #14
Source File: KafkaAnomalyDetectorMapper.java From adaptive-alerting with Apache License 2.0 | 5 votes |
@Override protected Topology buildTopology() { val config = getConfig(); val inputTopic = config.getInputTopic(); val defaultOutputTopic = config.getOutputTopic(); log.info("Initializing: inputTopic={}, defaultOutputTopic={}", inputTopic, defaultOutputTopic); val builder = new StreamsBuilder(); // create store StoreBuilder<KeyValueStore<String, MetricData>> keyValueStoreBuilder = Stores.keyValueStoreBuilder(Stores.inMemoryKeyValueStore(STATE_STORE_NAME), Serdes.String(), new MetricDataJsonSerde()) .withLoggingDisabled(); // register store builder.addStateStore(keyValueStoreBuilder); //Dynamically choose kafka topic depending on the consumer id. final TopicNameExtractor<String, MappedMetricData> kafkaTopicNameExtractor = (key, mappedMetricData, recordContext) -> { final String consumerId = mappedMetricData.getConsumerId(); if (DEFAULT_CONSUMER_ID.equals(consumerId)) { return defaultOutputTopic; } return defaultOutputTopic + "-" + consumerId; }; final KStream<String, MetricData> stream = builder.stream(inputTopic); stream .filter((key, md) -> md != null) .transform(new MetricDataTransformerSupplier(mapper, STATE_STORE_NAME), STATE_STORE_NAME) .flatMap(this::metricsByDetector) .to(kafkaTopicNameExtractor, Produced.with(outputKeySerde, outputValueSerde)); return builder.build(); }
Example #15
Source File: StockPerformanceStreamsAndProcessorApplication.java From kafka-streams-in-action with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Serde<String> stringSerde = Serdes.String(); Serde<StockPerformance> stockPerformanceSerde = StreamsSerdes.StockPerformanceSerde(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); StreamsBuilder builder = new StreamsBuilder(); String stocksStateStore = "stock-performance-store"; double differentialThreshold = 0.02; KeyValueBytesStoreSupplier storeSupplier = Stores.lruMap(stocksStateStore, 100); StoreBuilder<KeyValueStore<String, StockPerformance>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), stockPerformanceSerde); builder.addStateStore(storeBuilder); builder.stream("stock-transactions", Consumed.with(stringSerde, stockTransactionSerde)) .transform(() -> new StockPerformanceTransformer(stocksStateStore, differentialThreshold), stocksStateStore) .print(Printed.<String, StockPerformance>toSysOut().withLabel("StockPerformance")); //Uncomment this line and comment out the line above for writing to a topic //.to(stringSerde, stockPerformanceSerde, "stock-performance"); KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig); MockDataProducer.produceStockTransactionsWithKeyFunction(50, 50, 25, StockTransaction::getSymbol); System.out.println("Stock Analysis KStream/Process API App Started"); kafkaStreams.cleanUp(); kafkaStreams.start(); Thread.sleep(70000); System.out.println("Shutting down the Stock KStream/Process API Analysis App now"); kafkaStreams.close(); MockDataProducer.shutdown(); }
Example #16
Source File: StockPerformanceStreamsAndProcessorMultipleValuesApplication.java From kafka-streams-in-action with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Serde<String> stringSerde = Serdes.String(); Serde<StockPerformance> stockPerformanceSerde = StreamsSerdes.StockPerformanceSerde(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); StreamsBuilder builder = new StreamsBuilder(); String stocksStateStore = "stock-performance-store"; double differentialThreshold = 0.05; TransformerSupplier<String, StockTransaction, KeyValue<String, List<KeyValue<String, StockPerformance>>>> transformerSupplier = () -> new StockPerformanceMultipleValuesTransformer(stocksStateStore, differentialThreshold); KeyValueBytesStoreSupplier storeSupplier = Stores.lruMap(stocksStateStore, 100); StoreBuilder<KeyValueStore<String, StockPerformance>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), stockPerformanceSerde); builder.addStateStore(storeBuilder); builder.stream("stock-transactions", Consumed.with(stringSerde, stockTransactionSerde)) .transform(transformerSupplier, stocksStateStore).flatMap((dummyKey,valueList) -> valueList) .print(Printed.<String, StockPerformance>toSysOut().withLabel("StockPerformance")); //.to(stringSerde, stockPerformanceSerde, "stock-performance"); KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig); MockDataProducer.produceStockTransactionsWithKeyFunction(50, 50, 25, StockTransaction::getSymbol); System.out.println("Stock Analysis KStream/Process API App Started"); kafkaStreams.cleanUp(); kafkaStreams.start(); Thread.sleep(70000); System.out.println("Shutting down the Stock KStream/Process API Analysis App now"); kafkaStreams.close(); MockDataProducer.shutdown(); }
Example #17
Source File: TopologyProducer.java From quarkus-quickstarts with Apache License 2.0 | 5 votes |
@Produces public Topology buildTopology() { StreamsBuilder builder = new StreamsBuilder(); JsonbSerde<WeatherStation> weatherStationSerde = new JsonbSerde<>(WeatherStation.class); JsonbSerde<Aggregation> aggregationSerde = new JsonbSerde<>(Aggregation.class); KeyValueBytesStoreSupplier storeSupplier = Stores.persistentKeyValueStore(WEATHER_STATIONS_STORE); GlobalKTable<Integer, WeatherStation> stations = builder.globalTable( WEATHER_STATIONS_TOPIC, Consumed.with(Serdes.Integer(), weatherStationSerde)); builder.stream( TEMPERATURE_VALUES_TOPIC, Consumed.with(Serdes.Integer(), Serdes.String())) .join( stations, (stationId, timestampAndValue) -> stationId, (timestampAndValue, station) -> { String[] parts = timestampAndValue.split(";"); return new TemperatureMeasurement(station.id, station.name, Instant.parse(parts[0]), Double.valueOf(parts[1])); }) .groupByKey() .aggregate( Aggregation::new, (stationId, value, aggregation) -> aggregation.updateFrom(value), Materialized.<Integer, Aggregation> as(storeSupplier) .withKeySerde(Serdes.Integer()) .withValueSerde(aggregationSerde)) .toStream() .to( TEMPERATURES_AGGREGATED_TOPIC, Produced.with(Serdes.Integer(), aggregationSerde)); return builder.build(); }
Example #18
Source File: KafkaStreamsFunctionStateStoreTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 4 votes |
@Bean public StoreBuilder myStore() { return Stores.keyValueStoreBuilder( Stores.persistentKeyValueStore("my-store"), Serdes.Long(), Serdes.Long()); }
Example #19
Source File: TopologyFactory.java From rya with Apache License 2.0 | 4 votes |
@Override public TopologyBuilder build( final String sparqlQuery, final String statementsTopic, final String resultsTopic, final BNodeIdFactory bNodeIdFactory) throws MalformedQueryException, TopologyBuilderException { requireNonNull(sparqlQuery); requireNonNull(statementsTopic); requireNonNull(resultsTopic); final ParsedQuery parsedQuery = new SPARQLParser().parseQuery(sparqlQuery, null); final TopologyBuilder builder = new TopologyBuilder(); final TupleExpr expr = parsedQuery.getTupleExpr(); final QueryVisitor visitor = new QueryVisitor(bNodeIdFactory); expr.visit(visitor); processorEntryList = visitor.getProcessorEntryList(); final Map<TupleExpr, String> idMap = visitor.getIDs(); // add source node builder.addSource(SOURCE, new StringDeserializer(), new VisibilityStatementDeserializer(), statementsTopic); // processing the processor entry list in reverse order means we go from leaf // nodes -> parent nodes. // So, when the parent processing nodes get added, the upstream // processing node will already exist. ProcessorEntry entry = null; for (int ii = processorEntryList.size() - 1; ii >= 0; ii--) { entry = processorEntryList.get(ii); //statement patterns need to be connected to the Source. if(entry.getNode() instanceof StatementPattern) { builder.addProcessor(entry.getID(), entry.getSupplier(), SOURCE); } else { final List<TupleExpr> parents = entry.getUpstreamNodes(); final String[] parentIDs = new String[parents.size()]; for (int id = 0; id < parents.size(); id++) { parentIDs[id] = idMap.get(parents.get(id)); } builder.addProcessor(entry.getID(), entry.getSupplier(), parentIDs); } // Add a state store for any node type that requires one. if (entry.getNode() instanceof Join || entry.getNode() instanceof LeftJoin || entry.getNode() instanceof Group) { // Add a state store for the join processor. final StateStoreSupplier joinStoreSupplier = Stores.create( entry.getID() ) .withStringKeys() .withValues(new VisibilityBindingSetSerde()) .persistent() .build(); builder.addStateStore(joinStoreSupplier, entry.getID()); } } if (entry == null) { throw new TopologyBuilderException("No valid processor entries found."); } // Add a formatter that converts the ProcessorResults into the output format. final SinkEntry<?,?> sinkEntry = visitor.getSinkEntry(); builder.addProcessor("OUTPUT_FORMATTER", sinkEntry.getFormatterSupplier(), entry.getID()); // Add the sink. builder.addSink(SINK, resultsTopic, sinkEntry.getKeySerializer(), sinkEntry.getValueSerializer(), "OUTPUT_FORMATTER"); return builder; }
Example #20
Source File: StreamsTopologyProvider.java From apicurio-registry with Apache License 2.0 | 4 votes |
@Override public Topology get() { StreamsBuilder builder = new StreamsBuilder(); // Simple defaults ImmutableMap<String, String> configuration = ImmutableMap.of( TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT, TopicConfig.MIN_COMPACTION_LAG_MS_CONFIG, "0", TopicConfig.SEGMENT_BYTES_CONFIG, String.valueOf(64 * 1024 * 1024) ); // Input topic -- storage topic // This is where we handle "http" requests // Key is artifactId -- which is also used for KeyValue store key KStream<String, Str.StorageValue> storageRequest = builder.stream( properties.getStorageTopic(), Consumed.with(Serdes.String(), ProtoSerde.parsedWith(Str.StorageValue.parser())) ); // Data structure holds all artifact information // Global rules are Data as well, with constant artifactId (GLOBAL_RULES variable) String storageStoreName = properties.getStorageStoreName(); StoreBuilder<KeyValueStore<String /* artifactId */, Str.Data>> storageStoreBuilder = Stores .keyValueStoreBuilder( Stores.inMemoryKeyValueStore(storageStoreName), Serdes.String(), ProtoSerde.parsedWith(Str.Data.parser()) ) .withCachingEnabled() .withLoggingEnabled(configuration); builder.addStateStore(storageStoreBuilder); // We transform <artifactId, Data> into simple mapping <globalId, <artifactId, version>> KStream<Long, Str.TupleValue> globalRequest = storageRequest.transform( () -> new StorageTransformer(properties, dataDispatcher, factory), storageStoreName ).through( properties.getGlobalIdTopic(), Produced.with(Serdes.Long(), ProtoSerde.parsedWith(Str.TupleValue.parser())) ); String globalIdStoreName = properties.getGlobalIdStoreName(); StoreBuilder<KeyValueStore<Long /* globalId */, Str.TupleValue>> globalIdStoreBuilder = Stores .keyValueStoreBuilder( Stores.inMemoryKeyValueStore(globalIdStoreName), Serdes.Long(), ProtoSerde.parsedWith(Str.TupleValue.parser()) ) .withCachingEnabled() .withLoggingEnabled(configuration); builder.addStateStore(globalIdStoreBuilder); // Just handle globalId mapping -- put or delete globalRequest.process(() -> new GlobalIdProcessor(globalIdStoreName), globalIdStoreName); return builder.build(properties.getProperties()); }
Example #21
Source File: ProcessorKafkaStreamInstrumented.java From kafka-streams-ex with MIT License | 4 votes |
/** Runs the streams program, writing to the "fast-avgs-instrumented", * "medium-avgs-instrumented", and "slow-avgs-instrumented" topics. * * @param args Not used. */ public static void main(String[] args) throws Exception { // Configuration for Kafka Streams. Properties config = new Properties(); config.put(StreamsConfig.APPLICATION_ID_CONFIG, "processor-kafka-streams-instrumented"); config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); config.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "localhost:2181"); config.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); config.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.Double().getClass().getName()); // Start at latest message. config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"); // Create the state stores. We need one for each of the // MessageProcessor's in the topology. StateStoreSupplier fastStore = Stores.create("FAST-store") .withStringKeys() .withDoubleValues() .inMemory() .build(); // Build the topology. TopologyBuilder builder = new TopologyBuilder(); builder.addSource("messages-source", Serdes.String().deserializer(), Serdes.Double().deserializer(), "messages-instrumented") .addProcessor("FAST-processor", () -> new MovingAverageProcessor(0.1), "messages-source") .addStateStore(fastStore, "FAST-processor") .addSink("FAST-sink", "fast-avgs-instrumented", Serdes.String().serializer(), Serdes.Double().serializer(), "FAST-processor"); KafkaStreams streams = new KafkaStreams(builder, config); streams.start(); }
Example #22
Source File: TopolologyTestDriverKafkaStreamsInventoryCountTests.java From spring-cloud-stream-samples with Apache License 2.0 | 4 votes |
@BeforeEach void setup() { configureDeserializer(countEventSerde.deserializer(), ProductKey.class, InventoryCountEvent.class, false); configureDeserializer(keySerde.deserializer() ,ProductKey.class, null, true); final StreamsBuilder builder = new StreamsBuilder(); KStream<ProductKey, InventoryUpdateEvent> input = builder.stream(INPUT_TOPIC, Consumed.with(keySerde, updateEventSerde)); KafkaStreamsInventoryAggregator inventoryAggregator = new KafkaStreamsInventoryAggregator(Stores.inMemoryKeyValueStore(STORE_NAME)); KStream<ProductKey, InventoryCountEvent> output = inventoryAggregator.process().apply(input); output.to(OUTPUT_TOPIC); Topology topology = builder.build(); testDriver = new TopologyTestDriver(topology, getStreamsConfiguration()); logger.debug(topology.describe().toString()); setEventGenerator(new TopologyTestDriverUpdateEventGenerator(testDriver, INPUT_TOPIC, keySerde.serializer(), updateEventSerde.serializer())); }
Example #23
Source File: KafkaStreamsInventoryCountApplication.java From spring-cloud-stream-samples with Apache License 2.0 | 4 votes |
@Bean public KeyValueBytesStoreSupplier storeSupplier() { return Stores.inMemoryKeyValueStore(STORE_NAME); }
Example #24
Source File: StockPerformanceStreamsProcessorTopology.java From kafka-streams-in-action with Apache License 2.0 | 4 votes |
public static Topology build() { Serde<String> stringSerde = Serdes.String(); Serde<StockPerformance> stockPerformanceSerde = StreamsSerdes.StockPerformanceSerde(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); StreamsBuilder builder = new StreamsBuilder(); String stocksStateStore = "stock-performance-store"; double differentialThreshold = 0.02; KeyValueBytesStoreSupplier storeSupplier = Stores.lruMap(stocksStateStore, 100); StoreBuilder<KeyValueStore<String, StockPerformance>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), stockPerformanceSerde); builder.addStateStore(storeBuilder); builder.stream("stock-transactions", Consumed.with(stringSerde, stockTransactionSerde)) .transform(() -> new StockPerformanceTransformer(stocksStateStore, differentialThreshold), stocksStateStore) .to("stock-performance", Produced.with(stringSerde, stockPerformanceSerde)); return builder.build(); }
Example #25
Source File: KafkaStreamsStreamListenerSetupMethodOrchestrator.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 4 votes |
private StoreBuilder buildStateStore(KafkaStreamsStateStoreProperties spec) { try { Serde<?> keySerde = this.keyValueSerdeResolver .getStateStoreKeySerde(spec.getKeySerdeString()); Serde<?> valueSerde = this.keyValueSerdeResolver .getStateStoreValueSerde(spec.getValueSerdeString()); StoreBuilder builder; switch (spec.getType()) { case KEYVALUE: builder = Stores.keyValueStoreBuilder( Stores.persistentKeyValueStore(spec.getName()), keySerde, valueSerde); break; case WINDOW: builder = Stores .windowStoreBuilder( Stores.persistentWindowStore(spec.getName(), spec.getRetention(), 3, spec.getLength(), false), keySerde, valueSerde); break; case SESSION: builder = Stores.sessionStoreBuilder(Stores.persistentSessionStore( spec.getName(), spec.getRetention()), keySerde, valueSerde); break; default: throw new UnsupportedOperationException( "state store type (" + spec.getType() + ") is not supported!"); } if (spec.isCacheEnabled()) { builder = builder.withCachingEnabled(); } if (spec.isLoggingDisabled()) { builder = builder.withLoggingDisabled(); } return builder; } catch (Exception ex) { LOG.error("failed to build state store exception : " + ex); throw ex; } }
Example #26
Source File: CoGroupingApplication.java From kafka-streams-in-action with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Deserializer<String> stringDeserializer = Serdes.String().deserializer(); Serializer<String> stringSerializer = Serdes.String().serializer(); Serde<Tuple<List<ClickEvent>, List<StockTransaction>>> eventPerformanceTuple = StreamsSerdes.EventTransactionTupleSerde(); Serializer<Tuple<List<ClickEvent>, List<StockTransaction>>> tupleSerializer = eventPerformanceTuple.serializer(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); Deserializer<StockTransaction> stockTransactionDeserializer = stockTransactionSerde.deserializer(); Serde<ClickEvent> clickEventSerde = StreamsSerdes.ClickEventSerde(); Deserializer<ClickEvent> clickEventDeserializer = clickEventSerde.deserializer(); Topology topology = new Topology(); Map<String, String> changeLogConfigs = new HashMap<>(); changeLogConfigs.put("retention.ms", "120000"); changeLogConfigs.put("cleanup.policy", "compact,delete"); KeyValueBytesStoreSupplier storeSupplier = Stores.persistentKeyValueStore(TUPLE_STORE_NAME); StoreBuilder<KeyValueStore<String, Tuple<List<ClickEvent>, List<StockTransaction>>>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), eventPerformanceTuple).withLoggingEnabled(changeLogConfigs); topology.addSource("Txn-Source", stringDeserializer, stockTransactionDeserializer, "stock-transactions") .addSource("Events-Source", stringDeserializer, clickEventDeserializer, "events") .addProcessor("Txn-Processor", StockTransactionProcessor::new, "Txn-Source") .addProcessor("Events-Processor", ClickEventProcessor::new, "Events-Source") .addProcessor("CoGrouping-Processor", CogroupingProcessor::new, "Txn-Processor", "Events-Processor") .addStateStore(storeBuilder, "CoGrouping-Processor") .addSink("Tuple-Sink", "cogrouped-results", stringSerializer, tupleSerializer, "CoGrouping-Processor"); topology.addProcessor("Print", new KStreamPrinter("Co-Grouping"), "CoGrouping-Processor"); MockDataProducer.produceStockTransactionsAndDayTradingClickEvents(50, 100, 100, StockTransaction::getSymbol); KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig); System.out.println("Co-Grouping App Started"); kafkaStreams.cleanUp(); kafkaStreams.start(); Thread.sleep(70000); System.out.println("Shutting down the Co-Grouping App now"); kafkaStreams.close(); MockDataProducer.shutdown(); }
Example #27
Source File: CoGroupingListeningExampleApplication.java From kafka-streams-in-action with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Deserializer<String> stringDeserializer = Serdes.String().deserializer(); Serializer<String> stringSerializer = Serdes.String().serializer(); Serde<Tuple<List<ClickEvent>, List<StockTransaction>>> eventPerformanceTuple = StreamsSerdes.EventTransactionTupleSerde(); Serializer<Tuple<List<ClickEvent>, List<StockTransaction>>> tupleSerializer = eventPerformanceTuple.serializer(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); Deserializer<StockTransaction> stockTransactionDeserializer = stockTransactionSerde.deserializer(); Serde<ClickEvent> clickEventSerde = StreamsSerdes.ClickEventSerde(); Deserializer<ClickEvent> clickEventDeserializer = clickEventSerde.deserializer(); Topology topology = new Topology(); Map<String, String> changeLogConfigs = new HashMap<>(); changeLogConfigs.put("retention.ms","120000" ); changeLogConfigs.put("cleanup.policy", "compact,delete"); KeyValueBytesStoreSupplier storeSupplier = Stores.persistentKeyValueStore(TUPLE_STORE_NAME); StoreBuilder<KeyValueStore<String, Tuple<List<ClickEvent>, List<StockTransaction>>>> builder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), eventPerformanceTuple); topology.addSource("Txn-Source", stringDeserializer, stockTransactionDeserializer, "stock-transactions") .addSource( "Events-Source", stringDeserializer, clickEventDeserializer, "events") .addProcessor("Txn-Processor", StockTransactionProcessor::new, "Txn-Source") .addProcessor("Events-Processor", ClickEventProcessor::new, "Events-Source") .addProcessor("CoGrouping-Processor", CogroupingProcessor::new, "Txn-Processor", "Events-Processor") .addStateStore(builder.withLoggingEnabled(changeLogConfigs), "CoGrouping-Processor") .addSink("Tuple-Sink", "cogrouped-results", stringSerializer, tupleSerializer, "CoGrouping-Processor"); topology.addProcessor("Print", new KStreamPrinter("Co-Grouping"), "CoGrouping-Processor"); MockDataProducer.produceStockTransactionsAndDayTradingClickEvents(50, 100, 100, StockTransaction::getSymbol); KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig); kafkaStreams.setGlobalStateRestoreListener(new LoggingStateRestoreListener()); kafkaStreams.setUncaughtExceptionHandler((thread, exception) -> LOG.error("Thread [{}] encountered [{}]", thread.getName(), exception.getMessage()) ); kafkaStreams.setStateListener((newState, oldState) -> { if (oldState == KafkaStreams.State.REBALANCING && newState== KafkaStreams.State.RUNNING) { LOG.info("Topology Layout {}", topology.describe()); LOG.info("Thread metadata {}", kafkaStreams.localThreadsMetadata()); } }); LOG.info("Co-Grouping App Started"); kafkaStreams.cleanUp(); kafkaStreams.start(); Thread.sleep(70000); LOG.info("Shutting down the Co-Grouping metrics App now"); kafkaStreams.close(); MockDataProducer.shutdown(); }
Example #28
Source File: PregelComputation.java From kafka-graphs with Apache License 2.0 | 4 votes |
public void prepare(StreamsBuilder builder, Properties streamsConfig) { Properties producerConfig = ClientUtils.producerConfig( bootstrapServers, serialized.keySerde().serializer().getClass(), KryoSerializer.class, streamsConfig != null ? streamsConfig : new Properties() ); producerConfig.setProperty(ProducerConfig.CLIENT_ID_CONFIG, applicationId + "-producer"); this.producer = new KafkaProducer<>(producerConfig); final StoreBuilder<KeyValueStore<Integer, Map<K, Map<K, List<Message>>>>> workSetStoreBuilder = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(localworkSetStoreName), Serdes.Integer(), new KryoSerde<>() ); builder.addStateStore(workSetStoreBuilder); final StoreBuilder<KeyValueStore<K, Tuple4<Integer, VV, Integer, VV>>> solutionSetStoreBuilder = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(localSolutionSetStoreName), serialized.keySerde(), new KryoSerde<>() ); builder.addStateStore(solutionSetStoreBuilder); this.vertices = builder .table( verticesTopic, Materialized.<K, VV, KeyValueStore<Bytes, byte[]>>as(verticesStoreName) .withKeySerde(serialized.keySerde()).withValueSerde(serialized.vertexValueSerde()) ); this.edgesGroupedBySource = builder .table( edgesGroupedBySourceTopic, Materialized.<K, Map<K, EV>, KeyValueStore<Bytes, byte[]>>as(edgesStoreName) .withKeySerde(serialized.keySerde()).withValueSerde(new KryoSerde<>()) ); this.solutionSet = builder .table(solutionSetTopic, Consumed.<K, Tuple4<Integer, VV, Integer, VV>>with(serialized.keySerde(), new KryoSerde<>())) .mapValues(v -> v._4, Materialized.as(solutionSetStore)); // Initalize solution set this.vertices .toStream() .mapValues(v -> new Tuple4<>(-1, v, 0, v)) .to(solutionSetTopic, Produced.with(serialized.keySerde(), new KryoSerde<>())); // Initialize workset this.vertices .toStream() .peek((k, v) -> { try { int partition = PregelComputation.vertexToPartition(k, serialized.keySerde().serializer(), numPartitions); ZKUtils.addChild(curator, applicationId, new PregelState(State.CREATED, 0, Stage.SEND), childPath(partition)); } catch (Exception e) { throw toRuntimeException(e); } }) .mapValues((k, v) -> new Tuple3<>(0, k, initialMessage.map(Collections::singletonList).orElse(Collections.emptyList()))) .peek((k, v) -> log.trace("workset 0 before topic: (" + k + ", " + v + ")")) .<K, Tuple3<Integer, K, List<Message>>>to(workSetTopic, Produced.with(serialized.keySerde(), new KryoSerde<>())); this.workSet = builder .stream(workSetTopic, Consumed.with(serialized.keySerde(), new KryoSerde<Tuple3<Integer, K, List<Message>>>())) .peek((k, v) -> log.trace("workset 1 after topic: (" + k + ", " + v + ")")); KStream<K, Tuple2<Integer, Map<K, List<Message>>>> syncedWorkSet = workSet .transform(BarrierSync::new, localworkSetStoreName) .peek((k, v) -> log.trace("workset 2 after join: (" + k + ", " + v + ")")); KStream<K, Tuple3<Integer, Tuple4<Integer, VV, Integer, VV>, Map<K, List<Message>>>> superstepComputation = syncedWorkSet .transformValues(VertexComputeUdf::new, localSolutionSetStoreName, vertices.queryableStoreName(), edgesGroupedBySource.queryableStoreName()); // Compute the solution set delta KStream<K, Tuple4<Integer, VV, Integer, VV>> solutionSetDelta = superstepComputation .flatMapValues(v -> v._2 != null ? Collections.singletonList(v._2) : Collections.emptyList()) .peek((k, v) -> log.trace("solution set: (" + k + ", " + v + ")")); solutionSetDelta .to(solutionSetTopic, Produced.with(serialized.keySerde(), new KryoSerde<>())); // Compute the inbox of each vertex for the next step (new workset) KStream<K, Tuple2<Integer, Map<K, List<Message>>>> newworkSet = superstepComputation .mapValues(v -> new Tuple2<>(v._1, v._3)) .peek((k, v) -> log.trace("workset new: (" + k + ", " + v + ")")); newworkSet.process(() -> new SendMessages(producer)); }
Example #29
Source File: RewardsApp.java From Kafka-Streams-Real-time-Stream-Processing with The Unlicense | 4 votes |
public static void main(String[] args) { Properties props = new Properties(); props.put(StreamsConfig.APPLICATION_ID_CONFIG, AppConfigs.applicationID); props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers); StreamsBuilder streamsBuilder = new StreamsBuilder(); KStream<String, PosInvoice> KS0 = streamsBuilder.stream( AppConfigs.posTopicName, Consumed.with(PosSerdes.String(), PosSerdes.PosInvoice())); KStream<String, PosInvoice> KS1 = KS0.filter((key, value) -> value.getCustomerType().equalsIgnoreCase(AppConfigs.CUSTOMER_TYPE_PRIME)); KStream<String, PosInvoice> KS2 = KS1.through("rewards-intermediate", Produced.with(PosSerdes.String(), PosSerdes.PosInvoice(), new RewardsPartitioner())); StoreBuilder kvStoreBuilder = Stores.keyValueStoreBuilder( Stores.inMemoryKeyValueStore(AppConfigs.REWARDS_STORE_NAME), Serdes.String(), Serdes.Double() ); streamsBuilder.addStateStore(kvStoreBuilder); KStream<String, Notification> KS3 = KS2.transformValues( RewardsTransformer::new, AppConfigs.REWARDS_STORE_NAME); KS3.to(AppConfigs.notificationTopic, Produced.with(PosSerdes.String(), PosSerdes.Notification())); logger.info("Starting Kafka Streams"); KafkaStreams myStream = new KafkaStreams(streamsBuilder.build(), props); myStream.start(); Runtime.getRuntime().addShutdownHook(new Thread(() -> { logger.info("Stopping Stream"); myStream.close(); })); }
Example #30
Source File: StockPerformanceStreamsAndProcessorMetricsApplication.java From kafka-streams-in-action with Apache License 2.0 | 2 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Serde<String> stringSerde = Serdes.String(); Serde<StockPerformance> stockPerformanceSerde = StreamsSerdes.StockPerformanceSerde(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); StreamsBuilder builder = new StreamsBuilder(); String stocksStateStore = "stock-performance-store"; double differentialThreshold = 0.05; KeyValueBytesStoreSupplier storeSupplier = Stores.lruMap(stocksStateStore, 100); StoreBuilder<KeyValueStore<String, StockPerformance>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), stockPerformanceSerde); builder.addStateStore(storeBuilder); builder.stream("stock-transactions", Consumed.with(stringSerde, stockTransactionSerde)) .transform(() -> new StockPerformanceMetricsTransformer(stocksStateStore, differentialThreshold), stocksStateStore) .peek((k, v)-> LOG.info("[stock-performance] key: {} value: {}" , k, v)) .to( "stock-performance", Produced.with(stringSerde, stockPerformanceSerde)); KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig); MockDataProducer.produceStockTransactionsWithKeyFunction(50, 50, 25, StockTransaction::getSymbol); LOG.info("Stock Analysis KStream/Process API Metrics App Started"); kafkaStreams.cleanUp(); kafkaStreams.start(); Thread.sleep(70000); LOG.info("Shutting down the Stock KStream/Process API Analysis Metrics App now"); for (Map.Entry<MetricName, ? extends Metric> metricNameEntry :kafkaStreams.metrics().entrySet()) { Metric metric = metricNameEntry.getValue(); MetricName metricName = metricNameEntry.getKey(); if(!metric.metricValue().equals(0.0) && !metric.metricValue().equals(Double.NEGATIVE_INFINITY)) { LOG.info("MetricName {}", metricName.name()); LOG.info(" = {}", metric.metricValue()); } } kafkaStreams.close(); MockDataProducer.shutdown(); }