org.apache.kafka.common.serialization.Serdes Java Examples
The following examples show how to use
org.apache.kafka.common.serialization.Serdes.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ScsApplication.java From spring_io_2019 with Apache License 2.0 | 7 votes |
@StreamListener @SendTo(Bindings.RATED_MOVIES) KStream<Long, RatedMovie> rateMoviesFor(@Input(Bindings.AVG_TABLE) KTable<Long, Double> ratings, @Input(Bindings.MOVIES) KTable<Long, Movie> movies) { ValueJoiner<Movie, Double, RatedMovie> joiner = (movie, rating) -> new RatedMovie(movie.getMovieId(), movie.getReleaseYear(), movie.getTitle(), rating); movies .join(ratings, joiner, Materialized .<Long, RatedMovie, KeyValueStore<Bytes, byte[]>>as(Bindings.RATED_MOVIES_STORE) .withKeySerde(Serdes.Long()) .withValueSerde(new JsonSerde<>(RatedMovie.class))); return movies.join(ratings, joiner).toStream(); }
Example #2
Source File: NameJoinGlobalKTable.java From fluent-kafka-streams-tests with MIT License | 7 votes |
public Topology getTopologyWithIntermediateTopic() { final StreamsBuilder builder = new StreamsBuilder(); final KStream<Long, Long> inputStream = builder.stream(INPUT_TOPIC, Consumed.with(Serdes.Long(), Serdes.Long())); builder.stream(NAME_INPUT, Consumed.with(Serdes.Long(), Serdes.String())) .mapValues(name -> name.toUpperCase()) .to(INTERMEDIATE_TOPIC); final GlobalKTable<Long, String> joinTable = builder.globalTable(INTERMEDIATE_TOPIC); inputStream .join(joinTable, (id, valueId) -> valueId, (id, name) -> name) .to(OUTPUT_TOPIC, Produced.with(Serdes.Long(), Serdes.String())); return builder.build(); }
Example #3
Source File: KeyValueSerdeResolver.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 6 votes |
private boolean isSerdeFromStandardDefaults(Serde<?> serde) { if (serde != null) { if (Number.class.isAssignableFrom(serde.getClass())) { return true; } else if (Serdes.ByteArray().getClass().isAssignableFrom(serde.getClass())) { return true; } else if (Serdes.String().getClass().isAssignableFrom(serde.getClass())) { return true; } else if (Serdes.UUID().getClass().isAssignableFrom(serde.getClass())) { return true; } } return false; }
Example #4
Source File: StreamUtilsTest.java From kafka-graphs with Apache License 2.0 | 6 votes |
@Test public void testCollectionToStream() throws Exception { Collection<KeyValue<Integer, Integer>> input = new ArrayList<>(); for (Integer i : LEFT_INPUT) { input.add(new KeyValue<>(i, i)); } StreamsBuilder builder = new StreamsBuilder(); KStream<Integer, Integer> stream = StreamUtils.streamFromCollection( builder, PRODUCER_CONFIG, LEFT_INPUT_TOPIC, 50, (short) 1, Serdes.Integer(), Serdes.Integer(), input); stream.to(OUTPUT_TOPIC); startStreams(builder, Serdes.Integer(), Serdes.Integer()); Thread.sleep(1000); List<KeyValue<Integer, Integer>> records = consumeData( OUTPUT_TOPIC, IntegerDeserializer.class, IntegerDeserializer.class, 26, 10000L); for (KeyValue<Integer, Integer> record : records) { assertEquals(record.key, record.value); } streams.close(); }
Example #5
Source File: Kafka_Streams_MachineLearning_H2O_Application.java From kafka-streams-machine-learning-examples with Apache License 2.0 | 6 votes |
static Properties getStreamConfiguration(String bootstrapServers, String applicationId) { final Properties streamsConfiguration = new Properties(); // Give the Streams application a unique name. The name must be unique // in the Kafka cluster // against which the application is run. streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId); // Where to find Kafka broker(s). streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); // Specify default (de)serializers for record keys and for record // values. streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); // For illustrative purposes we disable record caches streamsConfiguration.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0); return streamsConfiguration; }
Example #6
Source File: SchemaKStreamTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
@Test public void testGroupByKey() { String selectQuery = "SELECT col0, col1 FROM test1 WHERE col0 > 100;"; PlanNode logicalPlan = planBuilder.buildLogicalPlan(selectQuery); initialSchemaKStream = new SchemaKStream(logicalPlan.getTheSourceNode().getSchema(), kStream, ksqlStream.getKeyField(), new ArrayList<>(), SchemaKStream.Type.SOURCE, functionRegistry, new MockSchemaRegistryClient()); Expression keyExpression = new DereferenceExpression( new QualifiedNameReference(QualifiedName.of("TEST1")), "COL0"); KsqlTopicSerDe ksqlTopicSerDe = new KsqlJsonTopicSerDe(); Serde<GenericRow> rowSerde = ksqlTopicSerDe.getGenericRowSerde( initialSchemaKStream.getSchema(), null, false, null); List<Expression> groupByExpressions = Arrays.asList(keyExpression); SchemaKGroupedStream groupedSchemaKStream = initialSchemaKStream.groupBy( Serdes.String(), rowSerde, groupByExpressions); Assert.assertEquals(groupedSchemaKStream.getKeyField().name(), "COL0"); }
Example #7
Source File: AggregationsAndReducingExample.java From kafka-streams-in-action with Apache License 2.0 | 6 votes |
private static Properties getProperties() { Properties props = new Properties(); props.put(StreamsConfig.APPLICATION_ID_CONFIG, "KTable-aggregations"); props.put(ConsumerConfig.GROUP_ID_CONFIG, "KTable-aggregations-id"); props.put(ConsumerConfig.CLIENT_ID_CONFIG, "KTable-aggregations-client"); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"); props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "30000"); props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, "10000"); props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, "1"); props.put(ConsumerConfig.METADATA_MAX_AGE_CONFIG, "10000"); props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put(StreamsConfig.REPLICATION_FACTOR_CONFIG, 1); props.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, WallclockTimestampExtractor.class); return props; }
Example #8
Source File: PlayerCommandConnector.java From football-events with MIT License | 6 votes |
public void build(StreamsBuilder builder) { KStream<byte[], JsonNode> playerSourceStream = builder.stream( CONNECT_PLAYERS_TOPIC, Consumed.with(Serdes.ByteArray(), new JsonNodeSerde())) .filter((id, json) -> creationOrSnapshot(json)); playerSourceStream.foreach(this::debug); KStream<String, PlayerStartedCareer> playerReadyStream = playerSourceStream .map((id, json) -> { PlayerStartedCareer event = createEvent(json); return KeyValue.pair(event.getAggId(), event); }); playerReadyStream.to(PLAYER_STARTED_CAREER_TOPIC, Produced.with( Serdes.String(), new JsonPojoSerde<>(PlayerStartedCareer.class))); }
Example #9
Source File: MainVerticle.java From kiqr with Apache License 2.0 | 6 votes |
@Override public void start(Future<Void> startFuture) throws Exception { Properties props = new Properties(); props.put(StreamsConfig.APPLICATION_ID_CONFIG, "kiqr"); props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.StringSerde.class); props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.LongSerde.class); props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, "0"); KStreamBuilder builder = new KStreamBuilder(); KTable<String, Long> table = builder.table(Serdes.String(), Serdes.Long(), "visits", "visitStore"); KTable<Windowed<String>, Long> windowedCount = table.toStream().groupByKey().count(TimeWindows.of(60), "visitCount"); vertx.deployVerticle(RestKiqrServerVerticle.Builder.serverBuilder(builder, props).withPort(2901).build(), res -> { if (res.succeeded()) { startFuture.complete(); } else { startFuture.fail(res.cause()); } }); }
Example #10
Source File: WordCount.java From fluent-kafka-streams-tests with MIT License | 6 votes |
public Topology getTopology() { final Serde<String> stringSerde = Serdes.String(); final Serde<Long> longSerde = Serdes.Long(); final StreamsBuilder builder = new StreamsBuilder(); final KStream<String, String> textLines = builder.stream(this.inputTopic); final Pattern pattern = Pattern.compile("\\W+", Pattern.UNICODE_CHARACTER_CLASS); final KTable<String, Long> wordCounts = textLines .flatMapValues(value -> Arrays.asList(pattern.split(value.toLowerCase()))) .groupBy((key, word) -> word) .count(Materialized.as("count")); wordCounts.toStream().to(this.outputTopic, Produced.with(stringSerde, longSerde)); return builder.build(); }
Example #11
Source File: ProcessStreamService.java From SkaETL with Apache License 2.0 | 6 votes |
public void createStreamEmail(String inputTopic, ParameterOutput parameterOutput) { String email = parameterOutput.getEmail(); if (email != null) { String template = parameterOutput.getTemplate(); StreamsBuilder builder = new StreamsBuilder(); if (template != null) builder.stream(inputTopic, Consumed.with(Serdes.String(), GenericSerdes.jsonNodeSerde())).process(() -> new JsonNodeEmailProcessor(email, template, emailService)); else builder.stream(inputTopic, Consumed.with(Serdes.String(), GenericSerdes.jsonNodeSerde())).process(() -> new JsonNodeEmailProcessor(email, emailService)); KafkaStreams streams = new KafkaStreams(builder.build(), KafkaUtils.createKStreamProperties(getProcessConsumer().getIdProcess() + ProcessConstants.EMAIL_PROCESS, getBootstrapServer())); Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); streams.start(); addStreams(streams); } else { log.error("destinationEmail is null and it's not normal"); } }
Example #12
Source File: AggregatingCount.java From kafka-tutorials with Apache License 2.0 | 6 votes |
public Topology buildTopology(Properties envProps, final SpecificAvroSerde<TicketSale> ticketSaleSerde) { final StreamsBuilder builder = new StreamsBuilder(); final String inputTopic = envProps.getProperty("input.topic.name"); final String outputTopic = envProps.getProperty("output.topic.name"); builder.stream(inputTopic, Consumed.with(Serdes.String(), ticketSaleSerde)) // Set key to title and value to ticket value .map((k, v) -> new KeyValue<>((String) v.getTitle(), (Integer) v.getTicketTotalValue())) // Group by title .groupByKey(Grouped.with(Serdes.String(), Serdes.Integer())) // Apply COUNT method .count() // Write to stream specified by outputTopic .toStream().to(outputTopic, Produced.with(Serdes.String(), Serdes.Long())); return builder.build(); }
Example #13
Source File: StreamsRegistryConfiguration.java From apicurio-registry with Apache License 2.0 | 6 votes |
@Produces @ApplicationScoped public ReadOnlyKeyValueStore<Long, Str.TupleValue> globalIdKeyValueStore( KafkaStreams streams, HostInfo storageLocalHost, StreamsProperties properties ) { return new DistributedReadOnlyKeyValueStore<>( streams, storageLocalHost, properties.getGlobalIdStoreName(), Serdes.Long(), ProtoSerde.parsedWith(Str.TupleValue.parser()), new DefaultGrpcChannelProvider(), true, (filter, over, id, tuple) -> true ); }
Example #14
Source File: VehicleStatusCountProcessor.java From microservice-patterns with Apache License 2.0 | 5 votes |
@Bean public KStream<String, Long> statusCountStreamProcessor(StreamsBuilder streamsBuilder) { KStream<Integer, VehicleLocation> stream = streamsBuilder.stream("gpslocation", //Read from topic Consumed.with(Serdes.Integer(), new JsonSerde<>(VehicleLocation.class))); //using Integer and JSON serde return stream.map((k,v)-> { // transform they key as Online/Offline based on status String online = v.isOnline() == true ? "Online" : "Offline"; return new KeyValue<>(online, v); }) .groupByKey(Serialized.with( //Group by the newly mapped key in previous step Serdes.String(), new JsonSerde<>(VehicleLocation.class)) ) .count(Materialized.as("statusCount")) // materialize this value to state store .toStream(); }
Example #15
Source File: TestContextBuilder.java From simplesource with Apache License 2.0 | 5 votes |
private static AggregateSerdes<String, TestCommand, TestEvent, Optional<TestAggregate>> aggregateSerdes() { return new AggregateSerdes<String, TestCommand, TestEvent, Optional<TestAggregate>>() { @Override public Serde<String> aggregateKey() { return Serdes.String(); } @Override public Serde<CommandRequest<String, TestCommand>> commandRequest() { return new MockInMemorySerde<>(); } @Override public Serde<CommandId> commandId() { return new MockInMemorySerde<>(); } @Override public Serde<ValueWithSequence<TestEvent>> valueWithSequence() { return new MockInMemorySerde<>(); } @Override public Serde<AggregateUpdate<Optional<TestAggregate>>> aggregateUpdate() { return new MockInMemorySerde<>(); } @Override public Serde<CommandResponse<String>> commandResponse() { return new MockInMemorySerde<>(); } }; }
Example #16
Source File: UserClicksPerMinuteTest.java From fluent-kafka-streams-tests with MIT License | 5 votes |
@Test void shouldWorkWithExplicitKeySerdes() { final long time = TimeUnit.MINUTES.toMillis(1); this.testTopology.input().withKeySerde(Serdes.Integer()) .at(time).add(USER, new ClickEvent(USER)); this.testTopology.streamOutput() .withKeySerde(Serdes.Integer()) .withValueSerde(new JsonSerde<>(ClickOutput.class)) .expectNextRecord().hasKey(USER).hasValue(new ClickOutput(USER, 1, time)); }
Example #17
Source File: WordCountTest.java From fluent-kafka-streams-tests with MIT License | 5 votes |
@Test public void shouldReturnCorrectIteratorExplicitStream() { this.testTopology.input().add("bla") .add("blub") .add("bla") .add("foo"); final List<String> expected = List.of("bla", "blub", "bla", "foo"); assertThat(this.testTopology.streamOutput().withSerde(Serdes.String(), Serdes.Long()).iterator()) .extracting(ProducerRecord::key) .containsAll(expected); }
Example #18
Source File: TwitterStreamsAnalyzer.java From kafka-streams with Apache License 2.0 | 5 votes |
public void run() { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); JsonSerializer<Tweet> tweetJsonSerializer = new JsonSerializer<>(); JsonDeserializer<Tweet> tweetJsonDeserializer = new JsonDeserializer<>(Tweet.class); Serde<Tweet> tweetSerde = Serdes.serdeFrom(tweetJsonSerializer, tweetJsonDeserializer); KStreamBuilder kStreamBuilder = new KStreamBuilder(); Classifier classifier = new Classifier(); classifier.train(new File("src/main/resources/kafkaStreamsTwitterTrainingData_clean.csv")); KeyValueMapper<String, Tweet, String> languageToKey = (k, v) -> StringUtils.isNotBlank(v.getText()) ? classifier.classify(v.getText()):"unknown"; Predicate<String, Tweet> isEnglish = (k, v) -> k.equals("english"); Predicate<String, Tweet> isFrench = (k, v) -> k.equals("french"); Predicate<String, Tweet> isSpanish = (k, v) -> k.equals("spanish"); KStream<String, Tweet> tweetKStream = kStreamBuilder.stream(Serdes.String(), tweetSerde, "twitterData"); KStream<String, Tweet>[] filteredStreams = tweetKStream.selectKey(languageToKey).branch(isEnglish, isFrench, isSpanish); filteredStreams[0].to(Serdes.String(), tweetSerde, "english"); filteredStreams[1].to(Serdes.String(), tweetSerde, "french"); filteredStreams[2].to(Serdes.String(), tweetSerde, "spanish"); kafkaStreams = new KafkaStreams(kStreamBuilder, streamsConfig); System.out.println("Starting twitter analysis streams"); kafkaStreams.start(); System.out.println("Started"); }
Example #19
Source File: KafkaStreamsPipeline.java From quarkus with Apache License 2.0 | 5 votes |
@Produces public Topology buildTopology() { StreamsBuilder builder = new StreamsBuilder(); ObjectMapperSerde<Category> categorySerde = new ObjectMapperSerde<>(Category.class); ObjectMapperSerde<Customer> customerSerde = new ObjectMapperSerde<>(Customer.class); ObjectMapperSerde<EnrichedCustomer> enrichedCustomerSerde = new ObjectMapperSerde<>(EnrichedCustomer.class); KTable<Integer, Category> categories = builder.table( "streams-test-categories", Consumed.with(Serdes.Integer(), categorySerde)); KStream<Integer, EnrichedCustomer> customers = builder .stream("streams-test-customers", Consumed.with(Serdes.Integer(), customerSerde)) .selectKey((id, customer) -> customer.category) .join( categories, (customer, category) -> { return new EnrichedCustomer(customer.id, customer.name, category); }, Joined.with(Serdes.Integer(), customerSerde, categorySerde)); KeyValueBytesStoreSupplier storeSupplier = Stores.inMemoryKeyValueStore("countstore"); customers.groupByKey() .count(Materialized.<Integer, Long> as(storeSupplier)); customers.selectKey((categoryId, customer) -> customer.id) .to("streams-test-customers-processed", Produced.with(Serdes.Integer(), enrichedCustomerSerde)); return builder.build(); }
Example #20
Source File: ValidatorDescriptionRepository.java From SkaETL with Apache License 2.0 | 5 votes |
public ValidatorDescriptionRepository(KafkaAdminService kafkaAdminService, KafkaConfiguration kafkaConfiguration) { super("validator-description", Serdes.serdeFrom(new GenericSerializer<>(), new GenericDeserializer<>(ValidatorDescription.class)), validatorDescription -> validatorDescription.getName(), kafkaAdminService, kafkaConfiguration); }
Example #21
Source File: SimulateStreamService.java From SkaETL with Apache License 2.0 | 5 votes |
public void createStreamSystemOut(String topicToConsume) { StreamsBuilder builder = new StreamsBuilder(); final Serde<SimulateData> simulateDataSerde = Serdes.serdeFrom(new SimulateDataSerializer(), new SimulateDataDeserializer()); builder.stream(topicToConsume, Consumed.with(Serdes.String(), simulateDataSerde)).process(() -> new LoggingProcessor<>()); KafkaStreams streams = new KafkaStreams(builder.build(), createKStreamProperties(SYSOUT_PROCESS, getBootstrapServer())); Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); streams.start(); }
Example #22
Source File: GraphOperationsITCase.java From kafka-graphs with Apache License 2.0 | 5 votes |
@Test public void testFilterVertices() throws Exception { Properties producerConfig = ClientUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class, LongSerializer.class, new Properties() ); StreamsBuilder builder = new StreamsBuilder(); KTable<Long, Long> vertices = StreamUtils.tableFromCollection(builder, producerConfig, Serdes.Long(), Serdes.Long(), TestGraphUtils.getLongLongVertices()); KTable<Edge<Long>, Long> edges = StreamUtils.tableFromCollection(builder, producerConfig, new KryoSerde<>(), Serdes.Long(), TestGraphUtils.getLongLongEdges()); KGraph<Long, Long, Long> graph = new KGraph<>( vertices, edges, GraphSerialized.with(Serdes.Long(), Serdes.Long(), Serdes.Long())); KTable<Edge<Long>, Long> data = graph.filterOnVertices((k, v) -> v > 2).edges(); startStreams(builder, Serdes.Long(), Serdes.Long()); Thread.sleep(5000); List<KeyValue<Edge<Long>, Long>> result = StreamUtils.listFromTable(streams, data); expectedResult = "3,4,34\n" + "3,5,35\n" + "4,5,45\n"; compareResultAsTuples(result, expectedResult); }
Example #23
Source File: FindDistinctEvents.java From kafka-tutorials with Apache License 2.0 | 5 votes |
public Topology buildTopology(Properties envProps, final SpecificAvroSerde<Click> clicksSerde) { final StreamsBuilder builder = new StreamsBuilder(); final String inputTopic = envProps.getProperty("input.topic.name"); final String outputTopic = envProps.getProperty("output.topic.name"); // How long we "remember" an event. During this time, any incoming duplicates of the event // will be, well, dropped, thereby de-duplicating the input data. // // The actual value depends on your use case. To reduce memory and disk usage, you could // decrease the size to purge old windows more frequently at the cost of potentially missing out // on de-duplicating late-arriving records. final Duration windowSize = Duration.ofMinutes(2); // retention period must be at least window size -- for this use case, we don't need a longer retention period // and thus just use the window size as retention time final Duration retentionPeriod = windowSize; final StoreBuilder<WindowStore<String, Long>> dedupStoreBuilder = Stores.windowStoreBuilder( Stores.persistentWindowStore(storeName, retentionPeriod, windowSize, false ), Serdes.String(), Serdes.Long()); builder.addStateStore(dedupStoreBuilder); builder .stream(inputTopic, Consumed.with(Serdes.String(), clicksSerde)) .transformValues(() -> new DeduplicationTransformer<>(windowSize.toMillis(), (key, value) -> value.getIp()), storeName) .filter((k, v) -> v != null) .to(outputTopic, Produced.with(Serdes.String(), clicksSerde)); return builder.build(); }
Example #24
Source File: StockPerformanceStreamsAndProcessorApplication.java From kafka-streams-in-action with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Serde<String> stringSerde = Serdes.String(); Serde<StockPerformance> stockPerformanceSerde = StreamsSerdes.StockPerformanceSerde(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); StreamsBuilder builder = new StreamsBuilder(); String stocksStateStore = "stock-performance-store"; double differentialThreshold = 0.02; KeyValueBytesStoreSupplier storeSupplier = Stores.lruMap(stocksStateStore, 100); StoreBuilder<KeyValueStore<String, StockPerformance>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), stockPerformanceSerde); builder.addStateStore(storeBuilder); builder.stream("stock-transactions", Consumed.with(stringSerde, stockTransactionSerde)) .transform(() -> new StockPerformanceTransformer(stocksStateStore, differentialThreshold), stocksStateStore) .print(Printed.<String, StockPerformance>toSysOut().withLabel("StockPerformance")); //Uncomment this line and comment out the line above for writing to a topic //.to(stringSerde, stockPerformanceSerde, "stock-performance"); KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig); MockDataProducer.produceStockTransactionsWithKeyFunction(50, 50, 25, StockTransaction::getSymbol); System.out.println("Stock Analysis KStream/Process API App Started"); kafkaStreams.cleanUp(); kafkaStreams.start(); Thread.sleep(70000); System.out.println("Shutting down the Stock KStream/Process API Analysis App now"); kafkaStreams.close(); MockDataProducer.shutdown(); }
Example #25
Source File: SchemaKGroupedStream.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") public SchemaKTable aggregate( final Initializer initializer, final UdafAggregator aggregator, final WindowExpression windowExpression, final Serde<GenericRow> topicValueSerDe) { final KTable aggKtable; if (windowExpression != null) { final Materialized<String, GenericRow, ?> materialized = Materialized.<String, GenericRow, WindowStore<Bytes, byte[]>>with( Serdes.String(), topicValueSerDe); final KsqlWindowExpression ksqlWindowExpression = windowExpression.getKsqlWindowExpression(); aggKtable = ksqlWindowExpression.applyAggregate( kgroupedStream, initializer, aggregator, materialized ); } else { aggKtable = kgroupedStream.aggregate( initializer, aggregator, Materialized.with(Serdes.String(), topicValueSerDe) ); } return new SchemaKTable( schema, aggKtable, keyField, sourceSchemaKStreams, windowExpression != null, SchemaKStream.Type.AGGREGATE, functionRegistry, schemaRegistryClient ); }
Example #26
Source File: WordCountTest.java From fluent-kafka-streams-tests with MIT License | 5 votes |
@Test void shouldFailForUnmachtedKey() { this.testTopology.input().add("bla") .add("blub") .add("bla"); assertThatThrownBy(() -> this.testTopology.streamOutput().withSerde(Serdes.String(), Serdes.Long()) .expectNextRecord().hasKey("blub")) .hasMessage("Record key does not match"); }
Example #27
Source File: KafkaStreamsNativeEncodingDecodingTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@StreamListener("input") @SendTo("output") public KStream<?, String> process(KStream<Object, String> input) { return input .flatMapValues( value -> Arrays.asList(value.toLowerCase().split("\\W+"))) .map((key, value) -> new KeyValue<>(value, value)) .groupByKey(Serialized.with(Serdes.String(), Serdes.String())) .windowedBy(TimeWindows.of(Duration.ofSeconds(5))).count(Materialized.as("foo-WordCounts-x")) .toStream().map((key, value) -> new KeyValue<>(null, "Count for " + key.key() + " : " + value)); }
Example #28
Source File: CollectionSerde.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
/** * Constructor to use when the application wants to specify the type * of the Serde used for the inner object. * * @param serde specify an explicit Serde * @param collectionsClass type of the Collection class */ public CollectionSerde(Serde<E> serde, Class<?> collectionsClass) { this.collectionClass = collectionsClass; this.inner = Serdes.serdeFrom( new CollectionSerializer<>(serde.serializer()), new CollectionDeserializer<>(serde.deserializer(), collectionsClass)); }
Example #29
Source File: KafkaStreamsLiveTest.java From tutorials with MIT License | 5 votes |
@Test @Ignore("it needs to have kafka broker running on local") public void shouldTestKafkaStreams() throws InterruptedException { // given String inputTopic = "inputTopic"; Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-live-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000); streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); // Use a temporary directory for storing state, which will be automatically removed after the test. streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath()); // when StreamsBuilder builder = new StreamsBuilder(); KStream<String, String> textLines = builder.stream(inputTopic); Pattern pattern = Pattern.compile("\\W+", Pattern.UNICODE_CHARACTER_CLASS); KTable<String, Long> wordCounts = textLines.flatMapValues(value -> Arrays.asList(pattern.split(value.toLowerCase()))).groupBy((key, word) -> word).count(); textLines.foreach((word, count) -> System.out.println("word: " + word + " -> " + count)); String outputTopic = "outputTopic"; final Serde<String> stringSerde = Serdes.String(); final Serde<String> longSerde = Serdes.String(); textLines.to(outputTopic, Produced.with(stringSerde,longSerde)); KafkaStreams streams = new KafkaStreams(new Topology(), streamsConfiguration); streams.start(); // then Thread.sleep(30000); streams.close(); }
Example #30
Source File: ITKafkaStreamsTracing.java From brave with Apache License 2.0 | 5 votes |
@Test public void should_create_spans_from_stream_with_tracing_mark_as_not_filtered_predicate_false() { String inputTopic = testName.getMethodName() + "-input"; String outputTopic = testName.getMethodName() + "-output"; StreamsBuilder builder = new StreamsBuilder(); builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String())) .transformValues( kafkaStreamsTracing.markAsNotFiltered("filterNot-2", (key, value) -> false)) .filterNot((k, v) -> Objects.isNull(v)) .to(outputTopic, Produced.with(Serdes.String(), Serdes.String())); Topology topology = builder.build(); KafkaStreams streams = buildKafkaStreams(topology); send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE)); waitForStreamToRun(streams); MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER); assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic); MutableSpan spanProcessor = testSpanHandler.takeLocalSpan(); assertChildOf(spanProcessor, spanInput); assertThat(spanProcessor.tags()).containsEntry(KAFKA_STREAMS_FILTERED_TAG, "false"); // the filter transformer returns true so record is not dropped MutableSpan spanOutput = testSpanHandler.takeRemoteSpan(PRODUCER); assertThat(spanOutput.tags()).containsEntry("kafka.topic", outputTopic); assertChildOf(spanOutput, spanProcessor); streams.close(); streams.cleanUp(); }