Java Code Examples for org.apache.kafka.common.serialization.Deserializer#configure()
The following examples show how to use
org.apache.kafka.common.serialization.Deserializer#configure() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaAvroSerdesTest.java From registry with Apache License 2.0 | 7 votes |
@Test public void testToggleStoringSchemaInHeader() { TestRecord record = new TestRecord(); record.setField1("Hello"); record.setField2("World"); String keySchemaHeaderName = KafkaAvroSerde.DEFAULT_KEY_SCHEMA_VERSION_ID; for (Boolean storeScheamIdInHeader : Arrays.asList(true, false)) { Map<String, Object> configs = new HashMap<>(); configs.put(KafkaAvroSerializer.STORE_SCHEMA_VERSION_ID_IN_HEADER, storeScheamIdInHeader.toString()); configs.put(AbstractAvroSnapshotDeserializer.SPECIFIC_AVRO_READER, true); KafkaAvroSerde serde = new KafkaAvroSerde(schemaRegistryClient); final Serializer<Object> serializer = serde.serializer(); serializer.configure(configs, true); Headers headers = new RecordHeaders(); final byte[] bytes = serializer.serialize(topic, headers, record); Assert.assertEquals(storeScheamIdInHeader, headers.lastHeader(keySchemaHeaderName) != null); final Deserializer<Object> deserializer = serde.deserializer(); deserializer.configure(configs, true); final TestRecord actual = (TestRecord) deserializer.deserialize(topic, headers, bytes); Assert.assertEquals(record, actual); } }
Example 2
Source File: KafkaDeserializerExtractorTest.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Test public void testConfluentJsonDeserializer() throws IOException { WorkUnitState mockWorkUnitState = getMockWorkUnitState(0L, 10L); mockWorkUnitState.setProp("json.value.type", KafkaRecord.class.getName()); KafkaRecord testKafkaRecord = new KafkaRecord("Hello World"); Serializer<KafkaRecord> kafkaEncoder = new KafkaJsonSerializer<>(); kafkaEncoder.configure(PropertiesUtils.propsToStringKeyMap(mockWorkUnitState.getProperties()), false); Deserializer<KafkaRecord> kafkaDecoder = new KafkaJsonDeserializer<>(); kafkaDecoder.configure(PropertiesUtils.propsToStringKeyMap(mockWorkUnitState.getProperties()), false); ByteBuffer testKafkaRecordByteBuffer = ByteBuffer.wrap(kafkaEncoder.serialize(TEST_TOPIC_NAME, testKafkaRecord)); KafkaSchemaRegistry<?, ?> mockKafkaSchemaRegistry = mock(KafkaSchemaRegistry.class); KafkaDeserializerExtractor kafkaDecoderExtractor = new KafkaDeserializerExtractor(mockWorkUnitState, Optional.fromNullable(Deserializers.CONFLUENT_JSON), kafkaDecoder, mockKafkaSchemaRegistry); ByteArrayBasedKafkaRecord mockMessageAndOffset = getMockMessageAndOffset(testKafkaRecordByteBuffer); Assert.assertEquals(kafkaDecoderExtractor.decodeRecord(mockMessageAndOffset), testKafkaRecord); }
Example 3
Source File: KsqlRestApplication.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
private static <T> Deserializer<T> getJsonDeserializer(Class<T> classs, boolean isKey) { Deserializer<T> result = new KafkaJsonDeserializer<>(); String typeConfigProperty = isKey ? KafkaJsonDeserializerConfig.JSON_KEY_TYPE : KafkaJsonDeserializerConfig.JSON_VALUE_TYPE; Map<String, ?> props = Collections.singletonMap( typeConfigProperty, classs ); result.configure(props, isKey); return result; }
Example 4
Source File: KsqlResourceTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
private static <T> Deserializer<T> getJsonDeserializer(Class<T> classs, boolean isKey) { Deserializer<T> result = new KafkaJsonDeserializer<>(); String typeConfigProperty = isKey ? KafkaJsonDeserializerConfig.JSON_KEY_TYPE : KafkaJsonDeserializerConfig.JSON_VALUE_TYPE; Map<String, ?> props = Collections.singletonMap( typeConfigProperty, classs ); result.configure(props, isKey); return result; }
Example 5
Source File: KsqlJsonTopicSerDe.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
@Override public Serde<GenericRow> getGenericRowSerde(Schema schema, KsqlConfig ksqlConfig, boolean isInternal, SchemaRegistryClient schemaRegistryClient) { Map<String, Object> serdeProps = new HashMap<>(); serdeProps.put("JsonPOJOClass", GenericRow.class); final Serializer<GenericRow> genericRowSerializer = new KsqlJsonSerializer(schema); genericRowSerializer.configure(serdeProps, false); final Deserializer<GenericRow> genericRowDeserializer = new KsqlJsonDeserializer(schema); genericRowDeserializer.configure(serdeProps, false); return Serdes.serdeFrom(genericRowSerializer, genericRowDeserializer); }
Example 6
Source File: KsqlDelimitedTopicSerDe.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
@Override public Serde<GenericRow> getGenericRowSerde(Schema schema, KsqlConfig ksqlConfig, boolean isInternal, SchemaRegistryClient schemaRegistryClient) { Map<String, Object> serdeProps = new HashMap<>(); final Serializer<GenericRow> genericRowSerializer = new KsqlDelimitedSerializer(schema); genericRowSerializer.configure(serdeProps, false); final Deserializer<GenericRow> genericRowDeserializer = new KsqlDelimitedDeserializer(schema); genericRowDeserializer.configure(serdeProps, false); return Serdes.serdeFrom(genericRowSerializer, genericRowDeserializer); }
Example 7
Source File: KafkaAvroSerdesTest.java From registry with Apache License 2.0 | 5 votes |
private void testSchemaHeaderNames(String customKeySchemaHeaderName, String customValueSchemaHeaderName) { TestRecord record = new TestRecord(); record.setField1("Hello"); record.setField2("World"); Map<String, Object> configs = new HashMap<>(); configs.put(KafkaAvroSerde.KEY_SCHEMA_VERSION_ID_HEADER_NAME, customKeySchemaHeaderName); configs.put(KafkaAvroSerde.VALUE_SCHEMA_VERSION_ID_HEADER_NAME, customValueSchemaHeaderName); configs.put(KafkaAvroSerializer.STORE_SCHEMA_VERSION_ID_IN_HEADER, "true"); configs.put(AbstractAvroSnapshotDeserializer.SPECIFIC_AVRO_READER, true); ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); AvroSerDesHandler handler = new DefaultAvroSerDesHandler(); handler.handlePayloadSerialization(outputStream, record); for (Boolean isKey : Arrays.asList(true, false)) { KafkaAvroSerde serde = new KafkaAvroSerde(schemaRegistryClient); final Serializer<Object> serializer = serde.serializer(); serializer.configure(configs, isKey); Headers headers = new RecordHeaders(); final byte[] bytes = serializer.serialize(topic, headers, record); Assert.assertArrayEquals(outputStream.toByteArray(), bytes); Assert.assertEquals(isKey, headers.lastHeader(customKeySchemaHeaderName) != null); Assert.assertEquals(!isKey, headers.lastHeader(customValueSchemaHeaderName) != null); final Deserializer<Object> deserializer = serde.deserializer(); deserializer.configure(configs, isKey); final TestRecord actual = (TestRecord) deserializer.deserialize(topic, headers, bytes); Assert.assertEquals(record, actual); } }
Example 8
Source File: ConfluentSchemaRegistryDeserializerProvider.java From beam with Apache License 2.0 | 5 votes |
@Override public Deserializer<T> getDeserializer(Map<String, ?> configs, boolean isKey) { ImmutableMap<String, Object> csrConfig = ImmutableMap.<String, Object>builder() .putAll(configs) .put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, schemaRegistryUrl) .build(); Deserializer<T> deserializer = (Deserializer<T>) new KafkaAvroDeserializer(getSchemaRegistryClient()); deserializer.configure(csrConfig, isKey); return deserializer; }
Example 9
Source File: LocalDeserializerProvider.java From beam with Apache License 2.0 | 5 votes |
@Override public Deserializer<T> getDeserializer(Map<String, ?> configs, boolean isKey) { try { Deserializer<T> deserializer = this.deserializer.getDeclaredConstructor().newInstance(); deserializer.configure(configs, isKey); return deserializer; } catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException e) { throw new RuntimeException("Could not instantiate deserializers", e); } }
Example 10
Source File: EphemeralKafkaBroker.java From kafka-junit with Apache License 2.0 | 5 votes |
/** * Create a consumer that can read from this broker * * @param keyDeserializer Key deserializer * @param valueDeserializer Value deserializer * @param overrideConfig Consumer config to override. Pass null if there aren't any * @param <K> Type of Key * @param <V> Type of Value * @return KafkaConsumer */ public <K, V> KafkaConsumer<K, V> createConsumer(Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Properties overrideConfig) { Properties conf = consumerConfig(); if (overrideConfig != null) { conf.putAll(overrideConfig); } keyDeserializer.configure(Maps.fromProperties(conf), true); valueDeserializer.configure(Maps.fromProperties(conf), false); return new KafkaConsumer<>(conf, keyDeserializer, valueDeserializer); }
Example 11
Source File: KafkaDeserializerExtractor.java From incubator-gobblin with Apache License 2.0 | 5 votes |
/** * Constructs a {@link Deserializer}, using the value of {@link #KAFKA_DESERIALIZER_TYPE}. */ private static Deserializer<?> getDeserializer(Properties props, Optional<Deserializers> deserializerType) throws ReflectiveOperationException { Deserializer<?> deserializer; if (deserializerType.isPresent()) { deserializer = ConstructorUtils.invokeConstructor(deserializerType.get().getDeserializerClass()); } else { deserializer = Deserializer.class .cast(ConstructorUtils.invokeConstructor(Class.forName(props.getProperty(KAFKA_DESERIALIZER_TYPE)))); } deserializer.configure(PropertiesUtils.propsToStringKeyMap(props), false); return deserializer; }
Example 12
Source File: RegistrySerdeTest.java From apicurio-registry with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") @RegistryServiceTest public void testConfiguration(Supplier<RegistryService> supplier) throws Exception { Schema schema = new Schema.Parser().parse("{\"type\":\"record\",\"name\":\"myrecord3\",\"fields\":[{\"name\":\"bar\",\"type\":\"string\"}]}"); String artifactId = generateArtifactId(); CompletionStage<ArtifactMetaData> csa = supplier.get().createArtifact( ArtifactType.AVRO, artifactId + "-myrecord3", null, new ByteArrayInputStream(schema.toString().getBytes(StandardCharsets.UTF_8)) ); ArtifactMetaData amd = ConcurrentUtil.result(csa); // reset any cache supplier.get().reset(); // wait for global id store to populate (in case of Kafka / Streams) ArtifactMetaData amdById = retry(() -> supplier.get().getArtifactMetaDataByGlobalId(amd.getGlobalId())); Assertions.assertNotNull(amdById); GenericData.Record record = new GenericData.Record(schema); record.put("bar", "somebar"); Map<String, Object> config = new HashMap<>(); config.put(AbstractKafkaSerDe.REGISTRY_URL_CONFIG_PARAM, "http://localhost:8081/api"); config.put(AbstractKafkaSerializer.REGISTRY_ARTIFACT_ID_STRATEGY_CONFIG_PARAM, new TopicRecordIdStrategy()); config.put(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, new FindLatestIdStrategy<>()); config.put(AvroDatumProvider.REGISTRY_AVRO_DATUM_PROVIDER_CONFIG_PARAM, new DefaultAvroDatumProvider<>()); Serializer<GenericData.Record> serializer = (Serializer<GenericData.Record>) getClass().getClassLoader() .loadClass(AvroKafkaSerializer.class.getName()) .newInstance(); serializer.configure(config, true); byte[] bytes = serializer.serialize(artifactId, record); Deserializer<GenericData.Record> deserializer = (Deserializer<GenericData.Record>) getClass().getClassLoader() .loadClass(AvroKafkaDeserializer.class.getName()) .newInstance(); deserializer.configure(config, true); record = deserializer.deserialize(artifactId, bytes); Assertions.assertEquals("somebar", record.get("bar").toString()); config.put(AbstractKafkaSerializer.REGISTRY_ARTIFACT_ID_STRATEGY_CONFIG_PARAM, TopicRecordIdStrategy.class); config.put(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, FindLatestIdStrategy.class); config.put(AvroDatumProvider.REGISTRY_AVRO_DATUM_PROVIDER_CONFIG_PARAM, DefaultAvroDatumProvider.class); serializer.configure(config, true); bytes = serializer.serialize(artifactId, record); deserializer.configure(config, true); record = deserializer.deserialize(artifactId, bytes); Assertions.assertEquals("somebar", record.get("bar").toString()); config.put(AbstractKafkaSerializer.REGISTRY_ARTIFACT_ID_STRATEGY_CONFIG_PARAM, TopicRecordIdStrategy.class.getName()); config.put(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, FindLatestIdStrategy.class.getName()); config.put(AvroDatumProvider.REGISTRY_AVRO_DATUM_PROVIDER_CONFIG_PARAM, DefaultAvroDatumProvider.class.getName()); serializer.configure(config, true); bytes = serializer.serialize(artifactId, record); deserializer.configure(config, true); record = deserializer.deserialize(artifactId, bytes); Assertions.assertEquals("somebar", record.get("bar").toString()); serializer.close(); deserializer.close(); }
Example 13
Source File: LiKafkaConsumerImpl.java From li-apache-kafka-clients with BSD 2-Clause "Simplified" License | 4 votes |
@SuppressWarnings("unchecked") private LiKafkaConsumerImpl(LiKafkaConsumerConfig configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Deserializer<LargeMessageSegment> largeMessageSegmentDeserializer, Auditor<K, V> consumerAuditor) { _autoCommitEnabled = configs.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG); _autoCommitInterval = configs.getInt(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG); _offsetResetStrategy = LiOffsetResetStrategy.valueOf(configs.getString(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG).toUpperCase(Locale.ROOT)); _lastAutoCommitMs = System.currentTimeMillis(); // We need to set the auto commit to false in KafkaConsumer because it is not large message aware. ByteArrayDeserializer byteArrayDeserializer = new ByteArrayDeserializer(); _kafkaConsumer = new KafkaConsumer<>(configs.configForVanillaConsumer(), byteArrayDeserializer, byteArrayDeserializer); _clientId = LiKafkaClientsUtils.getClientId(_kafkaConsumer); MetricName skippedRecordsMetricName = new MetricName( "records-skipped", "lnkd", "number of records skipped due to deserialization issues", Collections.singletonMap("client-id", _clientId) ); Metric skippedRecordsMetric = new Metric() { @Override public MetricName metricName() { return skippedRecordsMetricName; } @Override public double value() { return (double) _consumerRecordsProcessor.getRecordsSkipped(); } @Override public Object metricValue() { return value(); } }; _extraMetrics.put(skippedRecordsMetricName, skippedRecordsMetric); try { // Instantiate segment deserializer if needed. Deserializer segmentDeserializer = largeMessageSegmentDeserializer != null ? largeMessageSegmentDeserializer : configs.getConfiguredInstance(LiKafkaConsumerConfig.SEGMENT_DESERIALIZER_CLASS_CONFIG, Deserializer.class); segmentDeserializer.configure(configs.originals(), false); // Instantiate message assembler if needed. int messageAssemblerCapacity = configs.getInt(LiKafkaConsumerConfig.MESSAGE_ASSEMBLER_BUFFER_CAPACITY_CONFIG); int messageAssemblerExpirationOffsetGap = configs.getInt(LiKafkaConsumerConfig.MESSAGE_ASSEMBLER_EXPIRATION_OFFSET_GAP_CONFIG); boolean exceptionOnMessageDropped = configs.getBoolean(LiKafkaConsumerConfig.EXCEPTION_ON_MESSAGE_DROPPED_CONFIG); boolean treatBadSegmentsAsPayload = configs.getBoolean(LiKafkaConsumerConfig.TREAT_BAD_SEGMENTS_AS_PAYLOAD_CONFIG); MessageAssembler assembler = new MessageAssemblerImpl(messageAssemblerCapacity, messageAssemblerExpirationOffsetGap, exceptionOnMessageDropped, segmentDeserializer, treatBadSegmentsAsPayload); // Instantiate delivered message offset tracker if needed. int maxTrackedMessagesPerPartition = configs.getInt(LiKafkaConsumerConfig.MAX_TRACKED_MESSAGES_PER_PARTITION_CONFIG); DeliveredMessageOffsetTracker messageOffsetTracker = new DeliveredMessageOffsetTracker(maxTrackedMessagesPerPartition); // Instantiate auditor if needed. Auditor<K, V> auditor; if (consumerAuditor != null) { auditor = consumerAuditor; auditor.configure(configs.originals()); } else { auditor = configs.getConfiguredInstance(LiKafkaConsumerConfig.AUDITOR_CLASS_CONFIG, Auditor.class); } auditor.start(); // Instantiate key and value deserializer if needed. Deserializer<K> kDeserializer = keyDeserializer != null ? keyDeserializer : configs.getConfiguredInstance(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, Deserializer.class); kDeserializer.configure(configs.originals(), true); Deserializer<V> vDeserializer = valueDeserializer != null ? valueDeserializer : configs.getConfiguredInstance(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Deserializer.class); vDeserializer.configure(configs.originals(), false); // Instantiate consumer record processor _consumerRecordsProcessor = new ConsumerRecordsProcessor<>(assembler, kDeserializer, vDeserializer, messageOffsetTracker, auditor, _kafkaConsumer::committed); // Instantiate consumer rebalance listener _consumerRebalanceListener = new LiKafkaConsumerRebalanceListener<>(_consumerRecordsProcessor, this, _autoCommitEnabled); // Instantiate offset commit callback. _offsetCommitCallback = new LiKafkaOffsetCommitCallback(); _lastProcessedResult = null; } catch (Exception e) { _kafkaConsumer.close(); throw e; } }
Example 14
Source File: TopolologyTestDriverKafkaStreamsInventoryCountTests.java From spring-cloud-stream-samples with Apache License 2.0 | 4 votes |
private void configureDeserializer(Deserializer<?> deserializer, Class<?> keyDefaultType, Class<?> valueDefaultType, boolean isKey) { Map<String, Object> deserializerConfig = new HashMap<>(); deserializerConfig.put(JsonDeserializer.KEY_DEFAULT_TYPE, keyDefaultType); deserializerConfig.put(JsonDeserializer.VALUE_DEFAULT_TYPE, valueDefaultType); deserializer.configure(deserializerConfig, isKey); }
Example 15
Source File: PulsarKafkaConsumer.java From pulsar with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") private PulsarKafkaConsumer(ConsumerConfig consumerConfig, Schema<K> keySchema, Schema<V> valueSchema) { if (keySchema == null) { Deserializer<K> kafkaKeyDeserializer = consumerConfig.getConfiguredInstance( ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, Deserializer.class); kafkaKeyDeserializer.configure(consumerConfig.originals(), true); this.keySchema = new PulsarKafkaSchema<>(kafkaKeyDeserializer); } else { this.keySchema = keySchema; consumerConfig.ignore(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG); } if (valueSchema == null) { Deserializer<V> kafkaValueDeserializer = consumerConfig.getConfiguredInstance( ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Deserializer.class); kafkaValueDeserializer.configure(consumerConfig.originals(), true); this.valueSchema = new PulsarKafkaSchema<>(kafkaValueDeserializer); } else { this.valueSchema = valueSchema; consumerConfig.ignore(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG); } groupId = consumerConfig.getString(ConsumerConfig.GROUP_ID_CONFIG); isAutoCommit = consumerConfig.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG); strategy = getStrategy(consumerConfig.getString(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)); log.info("Offset reset strategy has been assigned value {}", strategy); String serviceUrl = consumerConfig.getList(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG).get(0); // If MAX_POLL_RECORDS_CONFIG is provided then use the config, else use default value. if(consumerConfig.values().containsKey(ConsumerConfig.MAX_POLL_RECORDS_CONFIG)){ maxRecordsInSinglePoll = consumerConfig.getInt(ConsumerConfig.MAX_POLL_RECORDS_CONFIG); } else { maxRecordsInSinglePoll = 1000; } interceptors = (List) consumerConfig.getConfiguredInstances( ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, ConsumerInterceptor.class); this.properties = new Properties(); consumerConfig.originals().forEach((k, v) -> properties.put(k, v)); ClientBuilder clientBuilder = PulsarClientKafkaConfig.getClientBuilder(properties); // Since this client instance is going to be used just for the consumers, we can enable Nagle to group // all the acknowledgments sent to broker within a short time frame clientBuilder.enableTcpNoDelay(false); try { client = clientBuilder.serviceUrl(serviceUrl).build(); } catch (PulsarClientException e) { throw new RuntimeException(e); } }
Example 16
Source File: PulsarKafkaConsumer.java From pulsar with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") private PulsarKafkaConsumer(ConsumerConfig consumerConfig, Schema<K> keySchema, Schema<V> valueSchema) { if (keySchema == null) { Deserializer<K> kafkaKeyDeserializer = consumerConfig.getConfiguredInstance( ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, Deserializer.class); kafkaKeyDeserializer.configure(consumerConfig.originals(), true); this.keySchema = new PulsarKafkaSchema<>(kafkaKeyDeserializer); } else { this.keySchema = keySchema; consumerConfig.ignore(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG); } if (valueSchema == null) { Deserializer<V> kafkaValueDeserializer = consumerConfig.getConfiguredInstance( ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Deserializer.class); kafkaValueDeserializer.configure(consumerConfig.originals(), true); this.valueSchema = new PulsarKafkaSchema<>(kafkaValueDeserializer); } else { this.valueSchema = valueSchema; consumerConfig.ignore(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG); } groupId = consumerConfig.getString(ConsumerConfig.GROUP_ID_CONFIG); isAutoCommit = consumerConfig.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG); strategy = getStrategy(consumerConfig.getString(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)); log.info("Offset reset strategy has been assigned value {}", strategy); String serviceUrl = consumerConfig.getList(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG).get(0); // there is not this config in kafka 0.9, so use default value. maxRecordsInSinglePoll = 1000; this.properties = new Properties(); consumerConfig.originals().forEach(properties::put); ClientBuilder clientBuilder = PulsarClientKafkaConfig.getClientBuilder(properties); // Since this client instance is going to be used just for the consumers, we can enable Nagle to group // all the acknowledgments sent to broker within a short time frame clientBuilder.enableTcpNoDelay(false); try { client = clientBuilder.serviceUrl(serviceUrl).build(); } catch (PulsarClientException e) { throw new RuntimeException(e); } }