org.apache.kafka.clients.consumer.RangeAssignor Java Examples

The following examples show how to use org.apache.kafka.clients.consumer.RangeAssignor. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaProcessor.java    From quarkus with Apache License 2.0 4 votes vote down vote up
@BuildStep
public void build(CombinedIndexBuildItem indexBuildItem, BuildProducer<ReflectiveClassBuildItem> reflectiveClass,
        Capabilities capabilities) {
    final Set<DotName> toRegister = new HashSet<>();

    collectImplementors(toRegister, indexBuildItem, Serializer.class);
    collectImplementors(toRegister, indexBuildItem, Deserializer.class);
    collectImplementors(toRegister, indexBuildItem, Partitioner.class);
    // PartitionAssignor is now deprecated, replaced by ConsumerPartitionAssignor
    collectImplementors(toRegister, indexBuildItem, PartitionAssignor.class);
    collectImplementors(toRegister, indexBuildItem, ConsumerPartitionAssignor.class);

    for (Class<?> i : BUILT_INS) {
        reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, i.getName()));
        collectSubclasses(toRegister, indexBuildItem, i);
    }
    if (capabilities.isCapabilityPresent(Capabilities.JSONB)) {
        reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, JsonbSerializer.class, JsonbDeserializer.class));
        collectSubclasses(toRegister, indexBuildItem, JsonbSerializer.class);
        collectSubclasses(toRegister, indexBuildItem, JsonbDeserializer.class);
    }
    if (capabilities.isCapabilityPresent(Capabilities.JACKSON)) {
        reflectiveClass.produce(
                new ReflectiveClassBuildItem(false, false, ObjectMapperSerializer.class, ObjectMapperDeserializer.class));
        collectSubclasses(toRegister, indexBuildItem, ObjectMapperSerializer.class);
        collectSubclasses(toRegister, indexBuildItem, ObjectMapperDeserializer.class);
    }

    for (DotName s : toRegister) {
        reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, s.toString()));
    }

    // built in partitioner and partition assignors
    reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, DefaultPartitioner.class.getName()));
    reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, RangeAssignor.class.getName()));
    reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, RoundRobinAssignor.class.getName()));
    reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, StickyAssignor.class.getName()));

    // classes needed to perform reflection on DirectByteBuffer - only really needed for Java 8
    reflectiveClass.produce(new ReflectiveClassBuildItem(true, false, "java.nio.DirectByteBuffer"));
    reflectiveClass.produce(new ReflectiveClassBuildItem(true, false, "sun.misc.Cleaner"));
}
 
Example #2
Source File: KafkaConsumerConfig.java    From samza with Apache License 2.0 4 votes vote down vote up
/**
 * Create kafka consumer configs, based on the subset of global configs.
 * @param config application config
 * @param systemName system name
 * @param clientId client id provided by the caller
 * @return KafkaConsumerConfig
 */
public static KafkaConsumerConfig getKafkaSystemConsumerConfig(Config config, String systemName, String clientId) {

  Config subConf = config.subset(String.format("systems.%s.consumer.", systemName), true);

  final String groupId = createConsumerGroupId(config);

  Map<String, Object> consumerProps = new HashMap<>(subConf);

  consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
  consumerProps.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId);

  // These are values we enforce in sazma, and they cannot be overwritten.

  // Disable consumer auto-commit because Samza controls commits
  consumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");

  // check if samza default offset value is defined
  String systemOffsetDefault = new SystemConfig(config).getSystemOffsetDefault(systemName);

  // Translate samza config value to kafka config value
  String autoOffsetReset = getAutoOffsetResetValue((String) consumerProps.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), systemOffsetDefault);
  LOG.info("setting auto.offset.reset for system {} to {}", systemName, autoOffsetReset);
  consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);

  // if consumer bootstrap servers are not configured, get them from the producer configs
  if (!subConf.containsKey(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
    String bootstrapServers =
        config.get(String.format("systems.%s.producer.%s", systemName, ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG));
    if (StringUtils.isEmpty(bootstrapServers)) {
      throw new SamzaException("Missing " + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG + " config  for " + systemName);
    }
    consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
  }

  // Always use default partition assignment strategy. Do not allow override.
  consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, RangeAssignor.class.getName());

  // the consumer is fully typed, and deserialization can be too. But in case it is not provided we should
  // default to byte[]
  if (!consumerProps.containsKey(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG)) {
    LOG.info("setting key serialization for the consumer(for system {}) to ByteArrayDeserializer", systemName);
    consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
  }
  if (!consumerProps.containsKey(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG)) {
    LOG.info("setting value serialization for the consumer(for system {}) to ByteArrayDeserializer", systemName);
    consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
  }

  // Override default max poll config if there is no value
  consumerProps.putIfAbsent(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, DEFAULT_KAFKA_CONSUMER_MAX_POLL_RECORDS);

  return new KafkaConsumerConfig(consumerProps, systemName);
}
 
Example #3
Source File: TestKafkaConsumerConfig.java    From samza with Apache License 2.0 4 votes vote down vote up
@Test
public void testDefaults() {
  Map<String, String> props = new HashMap<>();

  props.put(KAFKA_CONSUMER_PROPERTY_PREFIX + ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); // should be ignored
  props.put(KAFKA_CONSUMER_PROPERTY_PREFIX + ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG,
      "Ignore"); // should be ignored
  props.put(KAFKA_CONSUMER_PROPERTY_PREFIX + ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,
      "100"); // should NOT be ignored

  props.put(JobConfig.JOB_NAME, JOB_NAME);

  // if KAFKA_CONSUMER_PROPERTY_PREFIX is set, then PRODUCER should be ignored
  props.put(KAFKA_PRODUCER_PROPERTY_PREFIX + "bootstrap.servers", "ignroeThis:9092");
  props.put(KAFKA_CONSUMER_PROPERTY_PREFIX + "bootstrap.servers", "useThis:9092");

  Config config = new MapConfig(props);
  String clientId = KafkaConsumerConfig.createClientId(CLIENT_ID_PREFIX, config);
  KafkaConsumerConfig kafkaConsumerConfig =
      KafkaConsumerConfig.getKafkaSystemConsumerConfig(config, SYSTEM_NAME, clientId);

  Assert.assertEquals("false", kafkaConsumerConfig.get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG));

  Assert.assertEquals(KafkaConsumerConfig.DEFAULT_KAFKA_CONSUMER_MAX_POLL_RECORDS,
      kafkaConsumerConfig.get(ConsumerConfig.MAX_POLL_RECORDS_CONFIG));

  Assert.assertEquals(RangeAssignor.class.getName(),
      kafkaConsumerConfig.get(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG));

  Assert.assertEquals("useThis:9092", kafkaConsumerConfig.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG));
  Assert.assertEquals("100", kafkaConsumerConfig.get(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG));

  Assert.assertEquals(ByteArrayDeserializer.class.getName(),
      kafkaConsumerConfig.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG));

  Assert.assertEquals(ByteArrayDeserializer.class.getName(),
      kafkaConsumerConfig.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG));

  // validate group and client id generation
  Assert.assertEquals(CLIENT_ID_PREFIX.replace("-", "_") + "-" + JOB_NAME + "-" + "1",
      kafkaConsumerConfig.get(ConsumerConfig.CLIENT_ID_CONFIG));

  Assert.assertEquals(CLIENT_ID_PREFIX.replace("-", "_") + "-jobName-1",
      KafkaConsumerConfig.createClientId(CLIENT_ID_PREFIX, config));

  Assert.assertEquals("jobName-1", KafkaConsumerConfig.createConsumerGroupId(config));

  // validate setting of group and client id
  Assert.assertEquals(KafkaConsumerConfig.createConsumerGroupId(config),
      kafkaConsumerConfig.get(ConsumerConfig.GROUP_ID_CONFIG));

  Assert.assertEquals(KafkaConsumerConfig.createConsumerGroupId(config),
      kafkaConsumerConfig.get(ConsumerConfig.GROUP_ID_CONFIG));


  Assert.assertEquals(KafkaConsumerConfig.createClientId(CLIENT_ID_PREFIX, config),
      kafkaConsumerConfig.get(ConsumerConfig.CLIENT_ID_CONFIG));

  // with non-default job id
  props.put(JobConfig.JOB_ID, JOB_ID);
  config = new MapConfig(props);
  Assert.assertEquals(CLIENT_ID_PREFIX.replace("-", "_") + "-jobName-jobId",
      kafkaConsumerConfig.createClientId(CLIENT_ID_PREFIX, config));

  Assert.assertEquals("jobName-jobId", KafkaConsumerConfig.createConsumerGroupId(config));

}