org.apache.kafka.common.internals.Topic Java Examples

The following examples show how to use org.apache.kafka.common.internals.Topic. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaProtocolHandler.java    From kop with Apache License 2.0 6 votes vote down vote up
private void loadOffsetTopics(GroupCoordinator groupCoordinator) throws Exception {
    String offsetsTopic = kafkaConfig.getKafkaMetadataTenant() + "/" + kafkaConfig.getKafkaMetadataNamespace()
        + "/" + Topic.GROUP_METADATA_TOPIC_NAME;
    int numPartitions = kafkaConfig.getOffsetsTopicNumPartitions();
    List<CompletableFuture<Void>> lists = Lists.newArrayListWithExpectedSize(numPartitions);
    for (int i = 0; i < numPartitions; i++) {
        String partition = offsetsTopic + PARTITIONED_TOPIC_SUFFIX + i;
        String broker = brokerService.pulsar().getAdminClient().lookups()
            .lookupTopic(partition);

        if (log.isDebugEnabled()) {
            log.debug("found broker {} for offset topic partition {}. current broker: {}",
                broker, partition, brokerService.pulsar().getBrokerServiceUrl());
        }

        if (broker.equalsIgnoreCase(brokerService.pulsar().getBrokerServiceUrl())) {
            lists.add(groupCoordinator.handleGroupImmigration(i));
        }
    }
    FutureUtil.waitForAll(lists).get();
}
 
Example #2
Source File: KafkaValidator.java    From ditto with Eclipse Public License 2.0 6 votes vote down vote up
private static void validateTopic(final String topic, final DittoHeaders dittoHeaders,
        final String placeholderReplacement) {

    if (topic.isEmpty()) {
        throwEmptyException("topic", dittoHeaders);
    }

    try {
        final String topicWithoutPlaceholders =
                topic.replaceAll(Pattern.quote(placeholderReplacement), DUMMY_TOPIC);
        Topic.validate(topicWithoutPlaceholders);
    } catch (final InvalidTopicException e) {
        final String message = MessageFormat.format(INVALID_TOPIC_FORMAT, topic, e.getMessage());
        throw ConnectionConfigurationInvalidException.newBuilder(message)
                .dittoHeaders(dittoHeaders)
                .cause(e)
                .build();
    }
}
 
Example #3
Source File: GroupCoordinator.java    From kop with Apache License 2.0 6 votes vote down vote up
public Future<?> scheduleHandleTxnCompletion(
    long producerId,
    Stream<TopicPartition> offsetsPartitions,
    TransactionResult transactionResult
) {
    Stream<TopicPartition> validatedOffsetsPartitions =
        offsetsPartitions.map(tp -> {
            checkArgument(tp.topic().equals(Topic.GROUP_METADATA_TOPIC_NAME));
            return tp;
        });
    boolean isCommit = TransactionResult.COMMIT == transactionResult;
    return groupManager.scheduleHandleTxnCompletion(
        producerId,
        validatedOffsetsPartitions.map(TopicPartition::partition)
            .collect(Collectors.toSet()),
        isCommit
    );
}
 
Example #4
Source File: GroupCoordinatorTest.java    From kop with Apache License 2.0 5 votes vote down vote up
@Test
public void testCommitAndFetchOffsetsWithEmptyGroup() throws Exception {
    // For backwards compatibility, the coordinator supports committing/fetching offsets with an empty groupId.
    // To allow inspection and removal of the empty group, we must also support DescribeGroups and DeleteGroups

    TopicPartition tp = new TopicPartition("topic", 0);
    OffsetAndMetadata offset = OffsetAndMetadata.apply(0);
    String groupId = "";

    Map<TopicPartition, Errors> commitOffsetResult = groupCoordinator.handleCommitOffsets(
        groupId, OffsetCommitRequest.DEFAULT_MEMBER_ID, OffsetCommitRequest.DEFAULT_GENERATION_ID,
        ImmutableMap.<TopicPartition, OffsetAndMetadata>builder()
            .put(tp, offset)
            .build()
    ).get();
    assertEquals(Errors.NONE, commitOffsetResult.get(tp));

    KeyValue<Errors, Map<TopicPartition, PartitionData>> fetchOffsetsResult =
        groupCoordinator.handleFetchOffsets(groupId, Optional.of(Lists.newArrayList(tp)));
    assertEquals(Errors.NONE, fetchOffsetsResult.getKey());
    assertEquals(0, fetchOffsetsResult.getValue().get(tp).offset);

    KeyValue<Errors, GroupSummary> describeGroupResult = groupCoordinator.handleDescribeGroup(groupId);
    assertEquals(Errors.NONE, describeGroupResult.getKey());
    assertEquals(GroupState.Empty.toString(), describeGroupResult.getValue().state());

    TopicPartition groupTopicPartition = new TopicPartition(
        Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId
    );

    Map<String, Errors> deleteErrors = groupCoordinator.handleDeleteGroups(Sets.newHashSet(groupId));
    assertEquals(Errors.NONE, deleteErrors.get(groupId));

    KeyValue<Errors, Map<TopicPartition, PartitionData>> fetchOffsetsResult2 =
        groupCoordinator.handleFetchOffsets(groupId, Optional.of(Lists.newArrayList(tp)));
    assertEquals(Errors.NONE, fetchOffsetsResult2.getKey());
    assertEquals(OffsetFetchResponse.INVALID_OFFSET, fetchOffsetsResult2.getValue().get(tp).offset);
}
 
Example #5
Source File: TopicName.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
public TopicName(String name) {
    if (name == null || name.isEmpty()) {
        throw new IllegalArgumentException();
    }
    // TODO Shame we can't validate a topic name without relying on an internal class
    Topic.validate(name);
    this.name = name;
}
 
Example #6
Source File: ConsumerGroupCoordinatorUtils.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
/**
 * Instead of making targetGroupId an instance variable and then assigning it some value which this then looks up
 * it can just be a parameter to a method
 * hash(group.id) % (number of __consumer_offsets topic partitions).
 * The partition's leader is the group coordinator
 * Choose B s.t hash(A) % (number of __consumer_offsets topic partitions) == hash(B) % (number of __consumer_offsets topic partitions)
 * @param targetGroupId the identifier of the target consumer group
 * @param adminClient an Admin Client object
 */
public static String findCollision(String targetGroupId, AdminClient adminClient)
    throws ExecutionException, InterruptedException {
  if (targetGroupId.equals("")) {
    throw new IllegalArgumentException("The target consumer group identifier cannot be empty: " + targetGroupId);
  }

  int numOffsetsTopicPartitions = adminClient.describeTopics(Collections.singleton(Topic.GROUP_METADATA_TOPIC_NAME))
      .values()
      .get(Topic.GROUP_METADATA_TOPIC_NAME)
      .get()
      .partitions()
      .size();

  // Extract invariant from loop
  int targetConsumerOffsetsPartition = partitionFor(targetGroupId, numOffsetsTopicPartitions);

  //  This doesn't need to be an instance variable because we throw this out this value at the end of computation
  int groupSuffix = 0;

  // Extract return value so it's not computed twice, this reduces the possibility of bugs
  String newConsumerGroup;

  // Use while(true) otherwise halting condition is hard to read.
  while (true) {
    // TODO: could play fancy StringBuilder games here to make this generate less garbage
    newConsumerGroup = CONSUMER_GROUP_PREFIX_CANDIDATE + groupSuffix++;
    int newGroupNamePartition = ConsumerGroupCoordinatorUtils.partitionFor(newConsumerGroup, numOffsetsTopicPartitions);
    if (newGroupNamePartition == targetConsumerOffsetsPartition) {
      break;
    }
  }

  return newConsumerGroup;
}
 
Example #7
Source File: KafkaPublishTarget.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
private static String validateTopic(final String topic) {
    try {
        Topic.validate(topic);
        return topic;
    } catch (final InvalidTopicException e) {
        throw ConnectionConfigurationInvalidException.newBuilder(e.getMessage())
                .cause(e)
                .build();
    }
}
 
Example #8
Source File: KafkaProtocolHandler.java    From kop with Apache License 2.0 5 votes vote down vote up
@Override
public void onLoad(NamespaceBundle bundle) {
    // 1. get new partitions owned by this pulsar service.
    // 2. load partitions by GroupCoordinator.handleGroupImmigration.
    service.pulsar().getNamespaceService().getOwnedTopicListForNamespaceBundle(bundle)
        .whenComplete((topics, ex) -> {
            if (ex == null) {
                for (String topic : topics) {
                    TopicName name = TopicName.get(topic);
                    // already filtered namespace, check the local name without partition
                    if (Topic.GROUP_METADATA_TOPIC_NAME.equals(getKafkaTopicNameFromPulsarTopicname(name))) {
                        checkState(name.isPartitioned(),
                            "OffsetTopic should be partitioned in onLoad, but get " + name);
                        KafkaTopicManager.removeLookupCache(name.toString());

                        if (log.isDebugEnabled()) {
                            log.debug("New offset partition load:  {}, broker: {}",
                                name, service.pulsar().getBrokerServiceUrl());
                        }
                        groupCoordinator.handleGroupImmigration(name.getPartitionIndex());
                    }
                }
            } else {
                log.error("Failed to get owned topic list for "
                    + "OffsetTopicListener when triggering on-loading bundle {}.",
                    bundle, ex);
            }
        });
}
 
Example #9
Source File: GroupCoordinator.java    From kop with Apache License 2.0 5 votes vote down vote up
private void prepareRebalance(GroupMetadata group) {
    // if any members are awaiting sync, cancel their request and have them rejoin
    if (group.is(CompletingRebalance)) {
        resetAndPropagateAssignmentError(group, Errors.REBALANCE_IN_PROGRESS);
    }

    DelayedJoin delayedRebalance;

    if (group.is(Empty)) {
        delayedRebalance = new InitialDelayedJoin(this,
            joinPurgatory,
            group,
            groupConfig.groupInitialRebalanceDelayMs(),
            groupConfig.groupInitialRebalanceDelayMs(),
            Math.max(group.rebalanceTimeoutMs() - groupConfig.groupInitialRebalanceDelayMs(), 0));
    } else {
        delayedRebalance = new DelayedJoin(this, group, group.rebalanceTimeoutMs());
    }

    group.transitionTo(PreparingRebalance);

    log.info("Preparing to rebalance group {} with old generation {} ({}-{})",
        group.groupId(),
        group.generationId(),
        Topic.GROUP_METADATA_TOPIC_NAME,
        groupManager.partitionFor(group.groupId()));

    GroupKey groupKey = new GroupKey(group.groupId());
    joinPurgatory.tryCompleteElseWatch(delayedRebalance, Lists.newArrayList(groupKey));
}
 
Example #10
Source File: KafkaRequestHandler.java    From kop with Apache License 2.0 5 votes vote down vote up
private boolean isOffsetTopic(String topic) {
    String offsetsTopic = kafkaConfig.getKafkaMetadataTenant() + "/"
        + kafkaConfig.getKafkaMetadataNamespace()
        + "/" + Topic.GROUP_METADATA_TOPIC_NAME;

    return topic.contains(offsetsTopic);
}
 
Example #11
Source File: KafkaProtocolHandler.java    From kop with Apache License 2.0 5 votes vote down vote up
private String createKafkaOffsetsTopic(BrokerService service) throws PulsarServerException, PulsarAdminException {
    String offsetsTopic = kafkaConfig.getKafkaMetadataTenant() + "/" + kafkaConfig.getKafkaMetadataNamespace()
        + "/" + Topic.GROUP_METADATA_TOPIC_NAME;

    PartitionedTopicMetadata offsetsTopicMetadata =
        service.pulsar().getAdminClient().topics().getPartitionedTopicMetadata(offsetsTopic);
    if (offsetsTopicMetadata.partitions <= 0) {
        log.info("Kafka group metadata topic {} doesn't exist. Creating it ...",
                offsetsTopic);
        try {
            service.pulsar().getAdminClient().topics().createPartitionedTopic(
                    offsetsTopic,
                    kafkaConfig.getOffsetsTopicNumPartitions()
            );

            for (int i = 0; i < kafkaConfig.getOffsetsTopicNumPartitions(); i++) {
                service.pulsar().getAdminClient().topics()
                        .createNonPartitionedTopic(offsetsTopic + PARTITIONED_TOPIC_SUFFIX + i);
            }
        } catch (ConflictException e) {
            log.info("Topic {} concurrent creating and cause e: ", offsetsTopic, e);
            return offsetsTopic;
        }

        log.info("Successfully created group metadata topic {} with {} partitions.",
                offsetsTopic, kafkaConfig.getOffsetsTopicNumPartitions());
    }

    return offsetsTopic;
}
 
Example #12
Source File: KafkaProtocolHandler.java    From kop with Apache License 2.0 5 votes vote down vote up
public void initGroupCoordinator(BrokerService service) throws Exception {
    GroupConfig groupConfig = new GroupConfig(
        kafkaConfig.getGroupMinSessionTimeoutMs(),
        kafkaConfig.getGroupMaxSessionTimeoutMs(),
        kafkaConfig.getGroupInitialRebalanceDelayMs()
    );

    OffsetConfig offsetConfig = OffsetConfig.builder()
        .offsetsTopicName(kafkaConfig.getKafkaMetadataTenant() + "/"
            + kafkaConfig.getKafkaMetadataNamespace()
            + "/" + Topic.GROUP_METADATA_TOPIC_NAME)
        .offsetsTopicNumPartitions(kafkaConfig.getOffsetsTopicNumPartitions())
        .offsetsTopicCompressionType(CompressionType.valueOf(kafkaConfig.getOffsetsTopicCompressionCodec()))
        .maxMetadataSize(kafkaConfig.getOffsetMetadataMaxSize())
        .offsetsRetentionCheckIntervalMs(kafkaConfig.getOffsetsRetentionCheckIntervalMs())
        .offsetsRetentionMs(TimeUnit.MINUTES.toMillis(kafkaConfig.getOffsetsRetentionMinutes()))
        .build();

    createKafkaMetadataNamespaceIfNeeded(service);
    // topicName in pulsar format: tenant/ns/topic
    createKafkaOffsetsTopic(service);

    this.groupCoordinator = GroupCoordinator.of(
        (PulsarClientImpl) (service.pulsar().getClient()),
        groupConfig,
        offsetConfig,
        SystemTimer.builder()
            .executorName("group-coordinator-timer")
            .build(),
        Time.SYSTEM
    );

    loadOffsetTopics(groupCoordinator);
}
 
Example #13
Source File: KafkaProtocolHandler.java    From kop with Apache License 2.0 5 votes vote down vote up
@Override
public void unLoad(NamespaceBundle bundle) {
    // 1. get partitions owned by this pulsar service.
    // 2. remove partitions by groupCoordinator.handleGroupEmigration.
    service.pulsar().getNamespaceService().getOwnedTopicListForNamespaceBundle(bundle)
        .whenComplete((topics, ex) -> {
            if (ex == null) {
                for (String topic : topics) {
                    TopicName name = TopicName.get(topic);

                    // already filtered namespace, check the local name without partition
                    if (Topic.GROUP_METADATA_TOPIC_NAME.equals(getKafkaTopicNameFromPulsarTopicname(name))) {
                        checkState(name.isPartitioned(),
                            "OffsetTopic should be partitioned in unLoad, but get " + name);
                        KafkaTopicManager.removeLookupCache(name.toString());

                        if (log.isDebugEnabled()) {
                            log.debug("Offset partition unload:  {}, broker: {}",
                                name, service.pulsar().getBrokerServiceUrl());
                        }
                        groupCoordinator.handleGroupEmigration(name.getPartitionIndex());
                    }
                }
            } else {
                log.error("Failed to get owned topic list for "
                    + "OffsetTopicListener when triggering un-loading bundle {}.",
                    bundle, ex);
            }
        });
}
 
Example #14
Source File: GroupCoordinator.java    From kop with Apache License 2.0 4 votes vote down vote up
void onCompleteJoin(GroupMetadata group) {
    group.inLock(() -> {
        // remove any members who haven't joined the group yet
        group.notYetRejoinedMembers().forEach(failedMember -> {
            removeHeartbeatForLeavingMember(group, failedMember);
            group.remove(failedMember.memberId());
            // TODO: cut the socket connection to the client
        });

        if (!group.is(Dead)) {
            group.initNextGeneration();
            if (group.is(Empty)) {
                log.info("Group {} with generation {} is now empty {}-{}",
                    group.groupId(), group.generationId(),
                    Topic.GROUP_METADATA_TOPIC_NAME, groupManager.partitionFor(group.groupId()));

                groupManager.storeGroup(group, Collections.emptyMap()).thenAccept(error -> {
                    if (error != Errors.NONE) {
                        // we failed to write the empty group metadata. If the broker fails before another
                        // rebalance, the previous generation written to the log will become active again
                        // (and most likely timeout). This should be safe since there are no active members
                        // in an empty generation, so we just warn.
                        log.warn("Failed to write empty metadata for group {}: {}",
                            group.groupId(), error.message());
                    }
                    if (log.isDebugEnabled()) {
                        log.warn("add partition ownership for group {}",
                            group.groupId());
                    }
                    groupManager.addPartitionOwnership(groupManager.partitionFor(group.groupId()));
                });
            } else {
                log.info("Stabilized group {} generation {} ({}-{})",
                    group.groupId(), group.generationId(),
                    Topic.GROUP_METADATA_TOPIC_NAME,
                    groupManager.partitionFor(group.groupId()));

                // trigger the awaiting join group response callback for all the members after rebalancing
                for (MemberMetadata member : group.allMemberMetadata()) {
                    Objects.requireNonNull(member.awaitingJoinCallback());
                    Map<String, byte[]> members;
                    if (group.isLeader(member.memberId())) {
                        members = group.currentMemberMetadata();
                    } else {
                        members = Collections.emptyMap();
                    }
                    JoinGroupResult joinResult = new JoinGroupResult(
                        members,
                        member.memberId(),
                        group.generationId(),
                        group.protocolOrNull(),
                        group.leaderOrNull(),
                        Errors.NONE);

                    member.awaitingJoinCallback().complete(joinResult);
                    member.awaitingJoinCallback(null);
                    completeAndScheduleNextHeartbeatExpiration(group, member);
                }
            }
        }
        return null;
    });
}
 
Example #15
Source File: GroupCoordinatorTest.java    From kop with Apache License 2.0 4 votes vote down vote up
@Test
public void testBasicFetchTxnOffsets() throws Exception {
    TopicPartition tp = new TopicPartition("topic", 0);
    OffsetAndMetadata offset = OffsetAndMetadata.apply(0);
    long producerId = 1000L;
    short producerEpoch = 2;

    Map<TopicPartition, Errors> commitOffsetResult = groupCoordinator.handleTxnCommitOffsets(
        groupId, producerId, producerEpoch,
        ImmutableMap.<TopicPartition, OffsetAndMetadata>builder()
            .put(tp, offset)
            .build()
    ).get();
    assertEquals(Errors.NONE, commitOffsetResult.get(tp));

    KeyValue<Errors, Map<TopicPartition, PartitionData>> fetchOffsetsResult = groupCoordinator.handleFetchOffsets(
        groupId, Optional.of(Lists.newArrayList(tp))
    );

    // Validate that the offset isn't materialized yet.
    assertEquals(Errors.NONE, fetchOffsetsResult.getKey());
    assertEquals(OffsetFetchResponse.INVALID_OFFSET, fetchOffsetsResult.getValue().get(tp).offset);

    TopicPartition offsetsTopic = new TopicPartition(
        Topic.GROUP_METADATA_TOPIC_NAME,
        groupPartitionId
    );

    // send commit marker
    groupCoordinator.scheduleHandleTxnCompletion(
        producerId,
        Lists.newArrayList(offsetsTopic).stream(),
        TransactionResult.COMMIT
    ).get();

    // validate that committed offset is materialized
    KeyValue<Errors, Map<TopicPartition, PartitionData>> offsetFetchResult = groupCoordinator.handleFetchOffsets(
        groupId, Optional.of(Lists.newArrayList(tp))
    );
    assertEquals(Errors.NONE, offsetFetchResult.getKey());
    assertEquals(0, offsetFetchResult.getValue().get(tp).offset);
}
 
Example #16
Source File: GroupCoordinatorTest.java    From kop with Apache License 2.0 4 votes vote down vote up
@Test
public void testFetchTxnOffsetsWithAbort() throws Exception {
    TopicPartition tp = new TopicPartition("topic", 0);
    OffsetAndMetadata offset = OffsetAndMetadata.apply(0);
    long producerId = 1000L;
    short producerEpoch = 2;

    Map<TopicPartition, Errors> commitOffsetResult = groupCoordinator.handleTxnCommitOffsets(
        groupId, producerId, producerEpoch,
        ImmutableMap.<TopicPartition, OffsetAndMetadata>builder()
            .put(tp, offset)
            .build()
    ).get();
    assertEquals(Errors.NONE, commitOffsetResult.get(tp));

    KeyValue<Errors, Map<TopicPartition, PartitionData>> fetchOffsetsResult = groupCoordinator.handleFetchOffsets(
        groupId, Optional.of(Lists.newArrayList(tp))
    );

    // Validate that the offset isn't materialized yet.
    assertEquals(Errors.NONE, fetchOffsetsResult.getKey());
    assertEquals(OffsetFetchResponse.INVALID_OFFSET, fetchOffsetsResult.getValue().get(tp).offset);

    TopicPartition offsetsTopic = new TopicPartition(
        Topic.GROUP_METADATA_TOPIC_NAME,
        groupPartitionId
    );

    // send commit marker
    groupCoordinator.scheduleHandleTxnCompletion(
        producerId,
        Lists.newArrayList(offsetsTopic).stream(),
        TransactionResult.ABORT
    ).get();

    KeyValue<Errors, Map<TopicPartition, PartitionData>> offsetFetchResult = groupCoordinator.handleFetchOffsets(
        groupId, Optional.of(Lists.newArrayList(tp))
    );
    assertEquals(Errors.NONE, offsetFetchResult.getKey());
    assertEquals(OffsetFetchResponse.INVALID_OFFSET, offsetFetchResult.getValue().get(tp).offset);
}
 
Example #17
Source File: GroupCoordinatorTest.java    From kop with Apache License 2.0 4 votes vote down vote up
@Test
public void testFetchTxnOffsetsIgnoreSpuriousCommit() throws Exception {
    TopicPartition tp = new TopicPartition("topic", 0);
    OffsetAndMetadata offset = OffsetAndMetadata.apply(0);
    long producerId = 1000L;
    short producerEpoch = 2;

    Map<TopicPartition, Errors> commitOffsetResult = groupCoordinator.handleTxnCommitOffsets(
        groupId, producerId, producerEpoch,
        ImmutableMap.<TopicPartition, OffsetAndMetadata>builder()
            .put(tp, offset)
            .build()
    ).get();
    assertEquals(Errors.NONE, commitOffsetResult.get(tp));

    KeyValue<Errors, Map<TopicPartition, PartitionData>> fetchOffsetsResult = groupCoordinator.handleFetchOffsets(
        groupId, Optional.of(Lists.newArrayList(tp))
    );

    // Validate that the offset isn't materialized yet.
    assertEquals(Errors.NONE, fetchOffsetsResult.getKey());
    assertEquals(OffsetFetchResponse.INVALID_OFFSET, fetchOffsetsResult.getValue().get(tp).offset);

    TopicPartition offsetsTopic = new TopicPartition(
        Topic.GROUP_METADATA_TOPIC_NAME,
        groupPartitionId
    );

    // send commit marker
    groupCoordinator.scheduleHandleTxnCompletion(
        producerId,
        Lists.newArrayList(offsetsTopic).stream(),
        TransactionResult.ABORT
    ).get();

    KeyValue<Errors, Map<TopicPartition, PartitionData>> offsetFetchResult = groupCoordinator.handleFetchOffsets(
        groupId, Optional.of(Lists.newArrayList(tp))
    );
    assertEquals(Errors.NONE, offsetFetchResult.getKey());
    assertEquals(OffsetFetchResponse.INVALID_OFFSET, offsetFetchResult.getValue().get(tp).offset);

    // ignore spurious commit
    groupCoordinator.scheduleHandleTxnCompletion(
        producerId,
        Lists.newArrayList(offsetsTopic).stream(),
        TransactionResult.COMMIT
    ).get();

    KeyValue<Errors, Map<TopicPartition, PartitionData>> offsetFetchResult2 = groupCoordinator.handleFetchOffsets(
        groupId, Optional.of(Lists.newArrayList(tp))
    );
    assertEquals(Errors.NONE, offsetFetchResult2.getKey());
    assertEquals(OffsetFetchResponse.INVALID_OFFSET, offsetFetchResult2.getValue().get(tp).offset);
}
 
Example #18
Source File: GroupCoordinatorTest.java    From kop with Apache License 2.0 4 votes vote down vote up
@Test
public void testFetchTxnOffsetsMultipleProducersOneGroup() throws Exception {
    // One group, two producers
    // Different producers will commit offsets for different partitions.
    // Each partition's offsets should be materialized when the corresponding producer's marker is received.
    List<TopicPartition> partitions = Lists.newArrayList(
        new TopicPartition("topic1", 0),
        new TopicPartition("topic2", 0)
    );
    List<OffsetAndMetadata> offsets = Lists.newArrayList(
        OffsetAndMetadata.apply(10),
        OffsetAndMetadata.apply(15)
    );

    List<Long> producerIds = Lists.newArrayList(1000L, 1005L);
    List<Short> producerEpochs = Lists.newArrayList((short) 3, (short) 4);

    TopicPartition offsetTopicPartition = new TopicPartition(
        Topic.GROUP_METADATA_TOPIC_NAME,
        groupMetadataManager.partitionFor(groupId)
    );

    List<Errors> errors = new ArrayList<>();
    List<Map<TopicPartition, PartitionData>> partitionData = new ArrayList<>();
    List<Map<TopicPartition, Errors>> commitOffsetResults = new ArrayList<>();

    // producer0 commits the offsets for partition0
    commitOffsetResults.add(
        groupCoordinator.handleTxnCommitOffsets(
            groupId, producerIds.get(0), producerEpochs.get(0),
            ImmutableMap.<TopicPartition, OffsetAndMetadata>builder()
                .put(partitions.get(0), offsets.get(0)).build()).get());
    assertEquals(Errors.NONE, commitOffsetResults.get(0).get(partitions.get(0)));

    // producer1 commits the offsets for partition1
    commitOffsetResults.add(
        groupCoordinator.handleTxnCommitOffsets(
            groupId, producerIds.get(1), producerEpochs.get(1),
            ImmutableMap.<TopicPartition, OffsetAndMetadata>builder()
                .put(partitions.get(1), offsets.get(1)).build()).get());
    assertEquals(Errors.NONE, commitOffsetResults.get(1).get(partitions.get(1)));

    // producer0 commits its transaction.
    groupCoordinator.scheduleHandleTxnCompletion(
        producerIds.get(0),
        Lists.newArrayList(offsetTopicPartition).stream(),
        TransactionResult.COMMIT
    ).get();

    KeyValue<Errors, Map<TopicPartition, PartitionData>> offsetFetchResult0 =
        groupCoordinator.handleFetchOffsets(groupId, Optional.of(partitions));
    errors.add(offsetFetchResult0.getKey());
    partitionData.add(offsetFetchResult0.getValue());

    assertEquals(Errors.NONE, errors.get(0));
    // we should only see the offset commit for producer0
    assertEquals(
        offsets.get(0).offset(),
        partitionData.get(0).get(partitions.get(0)).offset);
    assertEquals(
        OffsetFetchResponse.INVALID_OFFSET,
        partitionData.get(0).get(partitions.get(1)).offset);

    // producer 1 now commits its transaction
    groupCoordinator.scheduleHandleTxnCompletion(
        producerIds.get(1),
        Lists.newArrayList(offsetTopicPartition).stream(),
        TransactionResult.COMMIT
    ).get();

    KeyValue<Errors, Map<TopicPartition, PartitionData>> offsetFetchResult1 =
        groupCoordinator.handleFetchOffsets(groupId, Optional.of(partitions));
    errors.add(offsetFetchResult1.getKey());
    partitionData.add(offsetFetchResult1.getValue());

    assertEquals(Errors.NONE, errors.get(1));

    assertEquals(
        offsets.get(0).offset(),
        partitionData.get(1).get(partitions.get(0)).offset);
    assertEquals(
        offsets.get(1).offset(),
        partitionData.get(1).get(partitions.get(1)).offset);
}
 
Example #19
Source File: NewConsumerTest.java    From kafka-monitor with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
@Test
public void testConsumerGroupCoordinatorHashing() throws ExecutionException, InterruptedException {
  Properties consumerProperties = new Properties();

  AdminClient adminClient = Mockito.mock(AdminClient.class);

  /*
   * Mock the behavior of AdminClient only.
   */
  Mockito.when(adminClient.describeTopics(Collections.singleton(Topic.GROUP_METADATA_TOPIC_NAME)))
      .thenReturn(Mockito.mock(DescribeTopicsResult.class));
  Mockito.when(adminClient.describeTopics(Collections.singleton(Topic.GROUP_METADATA_TOPIC_NAME)).values())
      .thenReturn(Mockito.mock(Map.class));
  Mockito.when(adminClient.describeTopics(Collections.singleton(Topic.GROUP_METADATA_TOPIC_NAME))
      .values()
      .get(Topic.GROUP_METADATA_TOPIC_NAME)).thenReturn(Mockito.mock(KafkaFutureImpl.class));

  Mockito.when(adminClient.describeTopics(Collections.singleton(Topic.GROUP_METADATA_TOPIC_NAME))
      .values()
      .get(Topic.GROUP_METADATA_TOPIC_NAME)
      .get()).thenReturn(Mockito.mock(TopicDescription.class));

  Mockito.when(adminClient.describeTopics(Collections.singleton(Topic.GROUP_METADATA_TOPIC_NAME))
      .values()
      .get(Topic.GROUP_METADATA_TOPIC_NAME)
      .get()
      .partitions()).thenReturn(Mockito.mock(List.class));

  Mockito.when(adminClient.describeTopics(Collections.singleton(Topic.GROUP_METADATA_TOPIC_NAME))
      .values()
      .get(Topic.GROUP_METADATA_TOPIC_NAME)
      .get()
      .partitions()
      .size()).thenReturn(NUM_OFFSETS_TOPIC_PARTITIONS);

  consumerProperties.put(ConsumerConfig.GROUP_ID_CONFIG,
      NewConsumer.configureGroupId(TARGET_CONSUMER_GROUP_ID, adminClient));
  System.out.println("Consumer properties after configuration: " + consumerProperties);
  Assert.assertNotNull(consumerProperties.get(ConsumerConfig.GROUP_ID_CONFIG));

  // Testing I: run partitionsFor() on the result to make sure they are the same
  int hashedResult =
      ConsumerGroupCoordinatorUtils.partitionFor(consumerProperties.get(ConsumerConfig.GROUP_ID_CONFIG).toString(),
          NUM_OFFSETS_TOPIC_PARTITIONS);
  int hashedResult2 =
      ConsumerGroupCoordinatorUtils.partitionFor(TARGET_CONSUMER_GROUP_ID, NUM_OFFSETS_TOPIC_PARTITIONS);

  Assert.assertEquals(hashedResult, hashedResult2);
  System.out.println("Modulo result as an absolute value: " + hashedResult);
  System.out.println("Modulo result as an absolute value: " + hashedResult2);

  // Testing II: Also test that the groupIds are different.
  Assert.assertNotEquals(TARGET_CONSUMER_GROUP_ID, consumerProperties.get(ConsumerConfig.GROUP_ID_CONFIG));

}