Java Code Examples for org.apache.kafka.common.serialization.Serde#serializer()
The following examples show how to use
org.apache.kafka.common.serialization.Serde#serializer() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestInput.java From fluent-kafka-streams-tests with MIT License | 6 votes |
/** * <p>Constructor for the test input topic.</p> * * @param testDriver Kafka's {@link TopologyTestDriver} used in this test. * @param topic Name of input topic. * @param keySerde Serde for key type in topic. * @param valueSerde Serde for value type in topic. */ protected TestInput(final TopologyTestDriver testDriver, final String topic, final Serde<K> keySerde, final Serde<V> valueSerde) { this.testDriver = testDriver; this.topic = topic; this.keySerde = keySerde; this.valueSerde = valueSerde; this.consumerFactory = new ConsumerRecordFactory<>(topic, keySerde == null ? new UnspecifiedSerializer<K>() : keySerde.serializer(), valueSerde == null ? new UnspecifiedSerializer<V>() : valueSerde.serializer()) { @Override public ConsumerRecord<byte[], byte[]> create(final String topicName, final K key, final V value, final Headers headers, final long timestampMs) { final ConsumerRecord<byte[], byte[]> record = super.create(topicName, key, value, headers, timestampMs); testDriver.pipeInput(record); return record; } }; }
Example 2
Source File: StreamUtils.java From kafka-graphs with Apache License 2.0 | 6 votes |
public static <K, V> KStream<K, V> streamFromCollection( StreamsBuilder builder, Properties props, String topic, int numPartitions, short replicationFactor, Serde<K> keySerde, Serde<V> valueSerde, Collection<KeyValue<K, V>> values) { ClientUtils.createTopic(topic, numPartitions, replicationFactor, props); try (Producer<K, V> producer = new KafkaProducer<>(props, keySerde.serializer(), valueSerde.serializer())) { for (KeyValue<K, V> value : values) { ProducerRecord<K, V> producerRecord = new ProducerRecord<>(topic, value.key, value.value); producer.send(producerRecord); } producer.flush(); } return builder.stream(topic, Consumed.with(keySerde, valueSerde)); }
Example 3
Source File: StreamUtils.java From kafka-graphs with Apache License 2.0 | 6 votes |
public static <K, V> KTable<K, V> tableFromCollection( StreamsBuilder builder, Properties props, String topic, int numPartitions, short replicationFactor, Serde<K> keySerde, Serde<V> valueSerde, Collection<KeyValue<K, V>> values) { ClientUtils.createTopic(topic, numPartitions, replicationFactor, props); try (Producer<K, V> producer = new KafkaProducer<>(props, keySerde.serializer(), valueSerde.serializer())) { for (KeyValue<K, V> value : values) { ProducerRecord<K, V> producerRecord = new ProducerRecord<>(topic, value.key, value.value); producer.send(producerRecord); } producer.flush(); } return builder.table(topic, Consumed.with(keySerde, valueSerde), Materialized.with(keySerde, valueSerde)); }
Example 4
Source File: KafkaRequestAPI.java From simplesource with Apache License 2.0 | 6 votes |
private static <K, V> RequestPublisher<K, V> kakfaProducerSender( KafkaConfig kafkaConfig, String topicName, Serde<K> keySerde, Serde<V> valueSerde) { KafkaProducer<K, V> producer = new KafkaProducer<>( kafkaConfig.producerConfig(), keySerde.serializer(), valueSerde.serializer()); return (key, value) -> { final ProducerRecord<K, V> record = new ProducerRecord<>( topicName, key, value); return FutureResult.ofFuture(producer.send(record), e -> { logger.error("Error returned from future", e); return e; }) .map(meta -> new RequestPublisher.PublishResult(meta.timestamp())); }; }
Example 5
Source File: ZMartProcessorApp.java From kafka-streams-in-action with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { MockDataProducer.producePurchaseData(); StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Deserializer<String> stringDeserializer = Serdes.String().deserializer(); Serializer<String> stringSerializer = Serdes.String().serializer(); Serde<Purchase> purchaseSerde = StreamsSerdes.PurchaseSerde(); Deserializer<Purchase> purchaseDeserializer = purchaseSerde.deserializer(); Serializer<Purchase> purchaseSerializer = purchaseSerde.serializer(); Serializer<PurchasePattern> patternSerializer = StreamsSerdes.PurchasePatternSerde().serializer(); Serializer<RewardAccumulator> rewardsSerializer = StreamsSerdes.RewardAccumulatorSerde().serializer(); Topology topology = new Topology(); topology.addSource("txn-source", stringDeserializer, purchaseDeserializer, "transactions") .addProcessor("masking-processor", () -> new MapValueProcessor<String, Purchase, Purchase>(p -> Purchase.builder(p).maskCreditCard().build()), "txn-source") .addProcessor("rewards-processor", () -> new MapValueProcessor<String, Purchase, RewardAccumulator>(purchase -> RewardAccumulator.builder(purchase).build()), "txn-source") .addProcessor("patterns-processor", () -> new MapValueProcessor<String, Purchase, PurchasePattern>(purchase -> PurchasePattern.builder(purchase).build()), "txn-source") .addSink("purchase-sink", "purchases", stringSerializer, purchaseSerializer, "masking-processor") .addSink("rewards-sink", "rewards", stringSerializer, rewardsSerializer, "rewards-processor") .addSink("patterns-sink", "patterns", stringSerializer, patternSerializer, "patterns-processor"); topology.addProcessor("purchase-printer", new KStreamPrinter("purchase"), "masking-processor") .addProcessor("rewards-printer", new KStreamPrinter("rewards"), "rewards-processor") .addProcessor("patterns-printer", new KStreamPrinter("pattens"), "patterns-processor"); KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig); System.out.println("ZMart Processor App Started"); kafkaStreams.start(); Thread.sleep(35000); System.out.println("Shutting down the ZMart Processor App now"); kafkaStreams.close(); MockDataProducer.shutdown(); }
Example 6
Source File: StockPerformanceApplication.java From kafka-streams-in-action with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Deserializer<String> stringDeserializer = Serdes.String().deserializer(); Serializer<String> stringSerializer = Serdes.String().serializer(); Serde<StockPerformance> stockPerformanceSerde = StreamsSerdes.StockPerformanceSerde(); Serializer<StockPerformance> stockPerformanceSerializer = stockPerformanceSerde.serializer(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); Deserializer<StockTransaction> stockTransactionDeserializer = stockTransactionSerde.deserializer(); Topology topology = new Topology(); String stocksStateStore = "stock-performance-store"; double differentialThreshold = 0.02; KeyValueBytesStoreSupplier storeSupplier = Stores.inMemoryKeyValueStore(stocksStateStore); StoreBuilder<KeyValueStore<String, StockPerformance>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), stockPerformanceSerde); topology.addSource("stocks-source", stringDeserializer, stockTransactionDeserializer,"stock-transactions") .addProcessor("stocks-processor", () -> new StockPerformanceProcessor(stocksStateStore, differentialThreshold), "stocks-source") .addStateStore(storeBuilder,"stocks-processor") .addSink("stocks-sink", "stock-performance", stringSerializer, stockPerformanceSerializer, "stocks-processor"); topology.addProcessor("stocks-printer", new KStreamPrinter("StockPerformance"), "stocks-processor"); KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig); MockDataProducer.produceStockTransactionsWithKeyFunction(50,50, 25, StockTransaction::getSymbol); System.out.println("Stock Analysis App Started"); kafkaStreams.cleanUp(); kafkaStreams.start(); Thread.sleep(70000); System.out.println("Shutting down the Stock Analysis App now"); kafkaStreams.close(); MockDataProducer.shutdown(); }
Example 7
Source File: CodecsTest.java From vertx-kafka-client with Apache License 2.0 | 5 votes |
private <T> void testSerializer(Class<T> type, T val) { final Serde<T> serde = VertxSerdes.serdeFrom(type); final Deserializer<T> deserializer = serde.deserializer(); final Serializer<T> serializer = serde.serializer(); assertEquals("Should get the original value after serialization and deserialization", val, deserializer.deserialize(topic, serializer.serialize(topic, val))); assertEquals("Should support null in serialization and deserialization", null, deserializer.deserialize(topic, serializer.serialize(topic, null))); }
Example 8
Source File: CogroupingStreamsTest.java From kafka-tutorials with Apache License 2.0 | 4 votes |
@Test public void cogroupingTest() throws IOException { final CogroupingStreams instance = new CogroupingStreams(); final Properties envProps = instance.loadEnvProperties(TEST_CONFIG_FILE); final Properties streamProps = instance.buildStreamsProperties(envProps); final String appOneInputTopicName = envProps.getProperty("app-one.topic.name"); final String appTwoInputTopicName = envProps.getProperty("app-two.topic.name"); final String appThreeInputTopicName = envProps.getProperty("app-three.topic.name"); final String totalResultOutputTopicName = envProps.getProperty("output.topic.name"); final Topology topology = instance.buildTopology(envProps); try (final TopologyTestDriver testDriver = new TopologyTestDriver(topology, streamProps)) { final Serde<String> stringAvroSerde = CogroupingStreams.getPrimitiveAvroSerde(envProps, true); final SpecificAvroSerde<LoginEvent> loginEventSerde = CogroupingStreams.getSpecificAvroSerde(envProps); final SpecificAvroSerde<LoginRollup> rollupSerde = CogroupingStreams.getSpecificAvroSerde(envProps); final Serializer<String> keySerializer = stringAvroSerde.serializer(); final Deserializer<String> keyDeserializer = stringAvroSerde.deserializer(); final Serializer<LoginEvent> loginEventSerializer = loginEventSerde.serializer(); final TestInputTopic<String, LoginEvent> appOneInputTopic = testDriver.createInputTopic(appOneInputTopicName, keySerializer, loginEventSerializer); final TestInputTopic<String, LoginEvent> appTwoInputTopic = testDriver.createInputTopic(appTwoInputTopicName, keySerializer, loginEventSerializer); final TestInputTopic<String, LoginEvent> appThreeInputTopic = testDriver.createInputTopic(appThreeInputTopicName, keySerializer, loginEventSerializer); final TestOutputTopic<String, LoginRollup> outputTopic = testDriver.createOutputTopic(totalResultOutputTopicName, keyDeserializer, rollupSerde.deserializer()); final List<LoginEvent> appOneEvents = new ArrayList<>(); appOneEvents.add(LoginEvent.newBuilder().setAppId("one").setUserId("foo").setTime(5L).build()); appOneEvents.add(LoginEvent.newBuilder().setAppId("one").setUserId("bar").setTime(6l).build()); appOneEvents.add(LoginEvent.newBuilder().setAppId("one").setUserId("bar").setTime(7L).build()); final List<LoginEvent> appTwoEvents = new ArrayList<>(); appTwoEvents.add(LoginEvent.newBuilder().setAppId("two").setUserId("foo").setTime(5L).build()); appTwoEvents.add(LoginEvent.newBuilder().setAppId("two").setUserId("foo").setTime(6l).build()); appTwoEvents.add(LoginEvent.newBuilder().setAppId("two").setUserId("bar").setTime(7L).build()); final List<LoginEvent> appThreeEvents = new ArrayList<>(); appThreeEvents.add(LoginEvent.newBuilder().setAppId("three").setUserId("foo").setTime(5L).build()); appThreeEvents.add(LoginEvent.newBuilder().setAppId("three").setUserId("foo").setTime(6l).build()); appThreeEvents.add(LoginEvent.newBuilder().setAppId("three").setUserId("bar").setTime(7L).build()); appThreeEvents.add(LoginEvent.newBuilder().setAppId("three").setUserId("bar").setTime(9L).build()); final Map<String, Map<String, Long>> expectedEventRollups = new TreeMap<>(); final Map<String, Long> expectedAppOneRollup = new HashMap<>(); final LoginRollup expectedLoginRollup = new LoginRollup(expectedEventRollups); expectedAppOneRollup.put("foo", 1L); expectedAppOneRollup.put("bar", 2L); expectedEventRollups.put("one", expectedAppOneRollup); final Map<String, Long> expectedAppTwoRollup = new HashMap<>(); expectedAppTwoRollup.put("foo", 2L); expectedAppTwoRollup.put("bar", 1L); expectedEventRollups.put("two", expectedAppTwoRollup); final Map<String, Long> expectedAppThreeRollup = new HashMap<>(); expectedAppThreeRollup.put("foo", 2L); expectedAppThreeRollup.put("bar", 2L); expectedEventRollups.put("three", expectedAppThreeRollup); sendEvents(appOneEvents, appOneInputTopic); sendEvents(appTwoEvents, appTwoInputTopic); sendEvents(appThreeEvents, appThreeInputTopic); final List<LoginRollup> actualLoginEventResults = outputTopic.readValuesToList(); final Map<String, Map<String, Long>> actualRollupMap = new HashMap<>(); for (LoginRollup actualLoginEventResult : actualLoginEventResults) { actualRollupMap.putAll(actualLoginEventResult.getLoginByAppAndUser()); } final LoginRollup actualLoginRollup = new LoginRollup(actualRollupMap); assertEquals(expectedLoginRollup, actualLoginRollup); } }
Example 9
Source File: StockPerformanceInteractiveQueryApplication.java From kafka-streams-in-action with Apache License 2.0 | 4 votes |
public static void main(String[] args) { if(args.length < 2){ LOG.error("Need to specify host, port"); System.exit(1); } String host = args[0]; int port = Integer.parseInt(args[1]); final HostInfo hostInfo = new HostInfo(host, port); Properties properties = getProperties(); properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, host+":"+port); StreamsConfig streamsConfig = new StreamsConfig(properties); Serde<String> stringSerde = Serdes.String(); Serde<Long> longSerde = Serdes.Long(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); WindowedSerializer<String> windowedSerializer = new WindowedSerializer<>(stringSerde.serializer()); WindowedDeserializer<String> windowedDeserializer = new WindowedDeserializer<>(stringSerde.deserializer()); Serde<Windowed<String>> windowedSerde = Serdes.serdeFrom(windowedSerializer, windowedDeserializer); Serde<CustomerTransactions> customerTransactionsSerde = StreamsSerdes.CustomerTransactionsSerde(); Aggregator<String, StockTransaction, Integer> sharesAggregator = (k, v, i) -> v.getShares() + i; StreamsBuilder builder = new StreamsBuilder(); // data is already coming in keyed KStream<String, StockTransaction> stockTransactionKStream = builder.stream(MockDataProducer.STOCK_TRANSACTIONS_TOPIC, Consumed.with(stringSerde, stockTransactionSerde) .withOffsetResetPolicy(Topology.AutoOffsetReset.LATEST)); stockTransactionKStream.map((k,v) -> KeyValue.pair(v.getSector(), v)) .groupByKey(Serialized.with(stringSerde, stockTransactionSerde)) .count(Materialized.as("TransactionsBySector")) .toStream() .peek((k,v) -> LOG.info("Transaction count for {} {}", k, v)) .to("sector-transaction-counts", Produced.with(stringSerde, longSerde)); stockTransactionKStream.map((k,v) -> KeyValue.pair(v.getCustomerId(), v)) .groupByKey(Serialized.with(stringSerde, stockTransactionSerde)) .windowedBy(SessionWindows.with(TimeUnit.MINUTES.toMillis(60)).until(TimeUnit.MINUTES.toMillis(120))) .aggregate(CustomerTransactions::new,(k, v, ct) -> ct.update(v), (k, ct, other)-> ct.merge(other), Materialized.<String, CustomerTransactions, SessionStore<Bytes, byte[]>>as("CustomerPurchaseSessions") .withKeySerde(stringSerde).withValueSerde(customerTransactionsSerde)) .toStream() .peek((k,v) -> LOG.info("Session info for {} {}", k, v)) .to("session-transactions", Produced.with(windowedSerde, customerTransactionsSerde)); stockTransactionKStream.groupByKey(Serialized.with(stringSerde, stockTransactionSerde)) .windowedBy(TimeWindows.of(10000)) .aggregate(() -> 0, sharesAggregator, Materialized.<String, Integer, WindowStore<Bytes, byte[]>>as("NumberSharesPerPeriod") .withKeySerde(stringSerde) .withValueSerde(Serdes.Integer())) .toStream().peek((k,v)->LOG.info("key is {} value is {}", k, v)) .to("transaction-count", Produced.with(windowedSerde,Serdes.Integer())); KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig); InteractiveQueryServer queryServer = new InteractiveQueryServer(kafkaStreams, hostInfo); StateRestoreHttpReporter restoreReporter = new StateRestoreHttpReporter(queryServer); queryServer.init(); kafkaStreams.setGlobalStateRestoreListener(restoreReporter); kafkaStreams.setStateListener(((newState, oldState) -> { if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) { LOG.info("Setting the query server to ready"); queryServer.setReady(true); } else if (newState != KafkaStreams.State.RUNNING) { LOG.info("State not RUNNING, disabling the query server"); queryServer.setReady(false); } })); kafkaStreams.setUncaughtExceptionHandler((t, e) -> { LOG.error("Thread {} had a fatal error {}", t, e, e); shutdown(kafkaStreams, queryServer); }); Runtime.getRuntime().addShutdownHook(new Thread(() -> { shutdown(kafkaStreams, queryServer); })); LOG.info("Stock Analysis KStream Interactive Query App Started"); kafkaStreams.cleanUp(); kafkaStreams.start(); }
Example 10
Source File: PopsHopsApplication.java From kafka-streams-in-action with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Deserializer<BeerPurchase> beerPurchaseDeserializer = new JsonDeserializer<>(BeerPurchase.class); Serde<String> stringSerde = Serdes.String(); Deserializer<String> stringDeserializer = stringSerde.deserializer(); Serializer<String> stringSerializer = stringSerde.serializer(); Serializer<BeerPurchase> beerPurchaseSerializer = new JsonSerializer<>(); Topology toplogy = new Topology(); String domesticSalesSink = "domestic-beer-sales"; String internationalSalesSink = "international-beer-sales"; String purchaseSourceNodeName = "beer-purchase-source"; String purchaseProcessor = "purchase-processor"; BeerPurchaseProcessor beerProcessor = new BeerPurchaseProcessor(domesticSalesSink, internationalSalesSink); toplogy.addSource(LATEST, purchaseSourceNodeName, new UsePreviousTimeOnInvalidTimestamp(), stringDeserializer, beerPurchaseDeserializer, Topics.POPS_HOPS_PURCHASES.topicName()) .addProcessor(purchaseProcessor, () -> beerProcessor, purchaseSourceNodeName); //Uncomment these two lines and comment out the printer lines for writing to topics // .addSink(internationalSalesSink,"international-sales", stringSerializer, beerPurchaseSerializer, purchaseProcessor) // .addSink(domesticSalesSink,"domestic-sales", stringSerializer, beerPurchaseSerializer, purchaseProcessor); //You'll have to comment these lines out if you want to write to topics as they have the same node names toplogy.addProcessor(domesticSalesSink, new KStreamPrinter("domestic-sales"), purchaseProcessor ); toplogy.addProcessor(internationalSalesSink, new KStreamPrinter("international-sales"), purchaseProcessor ); KafkaStreams kafkaStreams = new KafkaStreams(toplogy, streamsConfig); MockDataProducer.produceBeerPurchases(5); System.out.println("Starting Pops-Hops Application now"); kafkaStreams.cleanUp(); kafkaStreams.start(); Thread.sleep(70000); System.out.println("Shutting down Pops-Hops Application now"); kafkaStreams.close(); MockDataProducer.shutdown(); }
Example 11
Source File: CoGroupingApplication.java From kafka-streams-in-action with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Deserializer<String> stringDeserializer = Serdes.String().deserializer(); Serializer<String> stringSerializer = Serdes.String().serializer(); Serde<Tuple<List<ClickEvent>, List<StockTransaction>>> eventPerformanceTuple = StreamsSerdes.EventTransactionTupleSerde(); Serializer<Tuple<List<ClickEvent>, List<StockTransaction>>> tupleSerializer = eventPerformanceTuple.serializer(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); Deserializer<StockTransaction> stockTransactionDeserializer = stockTransactionSerde.deserializer(); Serde<ClickEvent> clickEventSerde = StreamsSerdes.ClickEventSerde(); Deserializer<ClickEvent> clickEventDeserializer = clickEventSerde.deserializer(); Topology topology = new Topology(); Map<String, String> changeLogConfigs = new HashMap<>(); changeLogConfigs.put("retention.ms", "120000"); changeLogConfigs.put("cleanup.policy", "compact,delete"); KeyValueBytesStoreSupplier storeSupplier = Stores.persistentKeyValueStore(TUPLE_STORE_NAME); StoreBuilder<KeyValueStore<String, Tuple<List<ClickEvent>, List<StockTransaction>>>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), eventPerformanceTuple).withLoggingEnabled(changeLogConfigs); topology.addSource("Txn-Source", stringDeserializer, stockTransactionDeserializer, "stock-transactions") .addSource("Events-Source", stringDeserializer, clickEventDeserializer, "events") .addProcessor("Txn-Processor", StockTransactionProcessor::new, "Txn-Source") .addProcessor("Events-Processor", ClickEventProcessor::new, "Events-Source") .addProcessor("CoGrouping-Processor", CogroupingProcessor::new, "Txn-Processor", "Events-Processor") .addStateStore(storeBuilder, "CoGrouping-Processor") .addSink("Tuple-Sink", "cogrouped-results", stringSerializer, tupleSerializer, "CoGrouping-Processor"); topology.addProcessor("Print", new KStreamPrinter("Co-Grouping"), "CoGrouping-Processor"); MockDataProducer.produceStockTransactionsAndDayTradingClickEvents(50, 100, 100, StockTransaction::getSymbol); KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig); System.out.println("Co-Grouping App Started"); kafkaStreams.cleanUp(); kafkaStreams.start(); Thread.sleep(70000); System.out.println("Shutting down the Co-Grouping App now"); kafkaStreams.close(); MockDataProducer.shutdown(); }
Example 12
Source File: CoGroupingListeningExampleApplication.java From kafka-streams-in-action with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Deserializer<String> stringDeserializer = Serdes.String().deserializer(); Serializer<String> stringSerializer = Serdes.String().serializer(); Serde<Tuple<List<ClickEvent>, List<StockTransaction>>> eventPerformanceTuple = StreamsSerdes.EventTransactionTupleSerde(); Serializer<Tuple<List<ClickEvent>, List<StockTransaction>>> tupleSerializer = eventPerformanceTuple.serializer(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); Deserializer<StockTransaction> stockTransactionDeserializer = stockTransactionSerde.deserializer(); Serde<ClickEvent> clickEventSerde = StreamsSerdes.ClickEventSerde(); Deserializer<ClickEvent> clickEventDeserializer = clickEventSerde.deserializer(); Topology topology = new Topology(); Map<String, String> changeLogConfigs = new HashMap<>(); changeLogConfigs.put("retention.ms","120000" ); changeLogConfigs.put("cleanup.policy", "compact,delete"); KeyValueBytesStoreSupplier storeSupplier = Stores.persistentKeyValueStore(TUPLE_STORE_NAME); StoreBuilder<KeyValueStore<String, Tuple<List<ClickEvent>, List<StockTransaction>>>> builder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), eventPerformanceTuple); topology.addSource("Txn-Source", stringDeserializer, stockTransactionDeserializer, "stock-transactions") .addSource( "Events-Source", stringDeserializer, clickEventDeserializer, "events") .addProcessor("Txn-Processor", StockTransactionProcessor::new, "Txn-Source") .addProcessor("Events-Processor", ClickEventProcessor::new, "Events-Source") .addProcessor("CoGrouping-Processor", CogroupingProcessor::new, "Txn-Processor", "Events-Processor") .addStateStore(builder.withLoggingEnabled(changeLogConfigs), "CoGrouping-Processor") .addSink("Tuple-Sink", "cogrouped-results", stringSerializer, tupleSerializer, "CoGrouping-Processor"); topology.addProcessor("Print", new KStreamPrinter("Co-Grouping"), "CoGrouping-Processor"); MockDataProducer.produceStockTransactionsAndDayTradingClickEvents(50, 100, 100, StockTransaction::getSymbol); KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig); kafkaStreams.setGlobalStateRestoreListener(new LoggingStateRestoreListener()); kafkaStreams.setUncaughtExceptionHandler((thread, exception) -> LOG.error("Thread [{}] encountered [{}]", thread.getName(), exception.getMessage()) ); kafkaStreams.setStateListener((newState, oldState) -> { if (oldState == KafkaStreams.State.REBALANCING && newState== KafkaStreams.State.RUNNING) { LOG.info("Topology Layout {}", topology.describe()); LOG.info("Thread metadata {}", kafkaStreams.localThreadsMetadata()); } }); LOG.info("Co-Grouping App Started"); kafkaStreams.cleanUp(); kafkaStreams.start(); Thread.sleep(70000); LOG.info("Shutting down the Co-Grouping metrics App now"); kafkaStreams.close(); MockDataProducer.shutdown(); }
Example 13
Source File: TestPublisher.java From simplesource with Apache License 2.0 | 4 votes |
TestPublisher(TopologyTestDriver driver, final Serde<K> keySerde, final Serde<V> valueSerde, String topicName) { this.driver = driver; this.topicName = topicName; factory = new ConsumerRecordFactory<>(keySerde.serializer(), valueSerde.serializer()); }
Example 14
Source File: TestDriverPublisher.java From simplesource with Apache License 2.0 | 4 votes |
TestDriverPublisher(final TopologyTestDriver driver, final Serde<K> keySerde, final Serde<V> valueSerde) { this.driver = driver; factory = new ConsumerRecordFactory<>(keySerde.serializer(), valueSerde.serializer()); }