org.apache.kafka.common.TopicPartition Java Examples
The following examples show how to use
org.apache.kafka.common.TopicPartition.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RangeTaskAssignorTest.java From mirus with BSD 3-Clause "New" or "Revised" License | 7 votes |
@Test public void testAssignBalanced() { List<TopicPartition> partitions = Arrays.asList( new TopicPartition("a", 0), new TopicPartition("a", 1), new TopicPartition("a", 2), new TopicPartition("b", 0), new TopicPartition("b", 1), new TopicPartition("b", 2)); List<List<TopicPartition>> result = sourceTaskAssignor.assign(partitions, 2); assertThat( result, is( Arrays.asList( Arrays.asList( new TopicPartition("a", 0), new TopicPartition("a", 1), new TopicPartition("a", 2)), Arrays.asList( new TopicPartition("b", 0), new TopicPartition("b", 1), new TopicPartition("b", 2))))); }
Example #2
Source File: FairAssignorTest.java From common-kafka with Apache License 2.0 | 6 votes |
@Test public void testTwoConsumersOneTopicOnePartition() { String topic = "topic"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 1); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(Collections.singletonList(topic))); consumers.put(consumer2, new Subscription(Collections.singletonList(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertEquals(Arrays.asList(new TopicPartition(topic, 0)), assignment.get(consumer1)); assertEquals(Collections.<TopicPartition>emptyList(), assignment.get(consumer2)); }
Example #3
Source File: ClusterTopicManipulationService.java From kafka-monitor with Apache License 2.0 | 6 votes |
private int processLogDirsWithinBroker( Map<TopicPartition, DescribeLogDirsResponse.ReplicaInfo> topicPartitionReplicaInfoMap, String topic, Node broker) { int totalPartitionsInBroker = 0; for (Map.Entry<TopicPartition, DescribeLogDirsResponse.ReplicaInfo> topicPartitionReplicaInfoEntry : topicPartitionReplicaInfoMap .entrySet()) { TopicPartition topicPartition = topicPartitionReplicaInfoEntry.getKey(); DescribeLogDirsResponse.ReplicaInfo replicaInfo = topicPartitionReplicaInfoEntry.getValue(); if (topicPartition.topic().equals(topic)) { totalPartitionsInBroker++; LOGGER.trace("totalPartitions In The Broker = {}", totalPartitionsInBroker); } LOGGER.trace("broker information: {}", broker); LOGGER.trace("logDirInfo for kafka-logs: topicPartition = {}, replicaInfo = {}", topicPartition, replicaInfo); } return totalPartitionsInBroker; }
Example #4
Source File: FileStreamSinkTaskTest.java From kafka-connector-skeleton with Apache License 2.0 | 6 votes |
@Test public void testPutFlush() { HashMap<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); // We do not call task.start() since it would override the output stream task.put(Arrays.asList( new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line1", 1) )); offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(1L)); task.flush(offsets); assertEquals("line1\n", os.toString()); task.put(Arrays.asList( new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line2", 2), new SinkRecord("topic2", 0, null, null, Schema.STRING_SCHEMA, "line3", 1) )); offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(2L)); offsets.put(new TopicPartition("topic2", 0), new OffsetAndMetadata(1L)); task.flush(offsets); assertEquals("line1\nline2\nline3\n", os.toString()); }
Example #5
Source File: MessageAssemblerTest.java From li-apache-kafka-clients with BSD 2-Clause "Simplified" License | 6 votes |
@Test public void testTreatBadSegmentAsPayload() { Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer(); Deserializer<LargeMessageSegment> segmentDeserializer = new DefaultSegmentDeserializer(); MessageAssembler messageAssembler = new MessageAssemblerImpl(100, 100, true, segmentDeserializer); TopicPartition tp = new TopicPartition("topic", 0); UUID uuid = UUID.randomUUID(); byte[] realPayload = "message".getBytes(); LargeMessageSegment badSegment = new LargeMessageSegment(uuid, -1, 100, -1, ByteBuffer.wrap(realPayload)); byte[] messageWrappedBytes = segmentSerializer.serialize(tp.topic(), badSegment); Assert.assertTrue(messageWrappedBytes.length > realPayload.length); //wrapping has been done messageAssembler.assemble(tp, 0, messageWrappedBytes); MessageAssembler.AssembleResult assembleResult = messageAssembler.assemble(tp, 0, messageWrappedBytes); Assert.assertEquals(assembleResult.messageBytes(), messageWrappedBytes); Assert.assertEquals(assembleResult.messageStartingOffset(), 0); Assert.assertEquals(assembleResult.messageEndingOffset(), 0); }
Example #6
Source File: OffsetManager.java From data-highway with Apache License 2.0 | 6 votes |
public Map<Integer, Long> getCommittedOffsets(String topicName) { synchronized (consumer) { List<TopicPartition> topicPartitions = topicPartitions(topicName); ImmutableMap.Builder<Integer, Long> builder = ImmutableMap.builder(); topicPartitions.forEach(tp -> { OffsetAndMetadata offsetAndMetadata = consumer.committed(tp); Long offset; if (offsetAndMetadata == null) { offset = consumer.beginningOffsets(singleton(tp)).get(tp); } else { offset = offsetAndMetadata.offset(); } builder.put(tp.partition(), offset); }); return builder.build(); } }
Example #7
Source File: TaskConfigBuilder.java From mirus with BSD 3-Clause "New" or "Revised" License | 6 votes |
/** Generate a list of Task Configuration Maps from the current list of partitions. */ public List<Map<String, String>> fromPartitionList( int maxTasks, List<TopicPartition> topicPartitionList) { // Assign partitions to tasks. List<List<TopicPartition>> partitionsByTask = sourceTaskAssignor.assign( topicPartitionList, Math.min(topicPartitionList.size(), maxTasks)); // Generate configuration for each task. AtomicInteger taskCounter = new AtomicInteger(); return partitionsByTask .stream() .map(TopicPartitionSerDe::toJson) .map(partitionList -> mapOf(TaskConfigDefinition.PARTITION_LIST, partitionList)) .peek(t -> t.putAll(filteredConfig)) .map(m -> makeClientIdUnique(m, taskCounter.getAndIncrement())) .collect(Collectors.toList()); }
Example #8
Source File: MaasEvent.java From DBus with Apache License 2.0 | 6 votes |
public MaasEvent(String topic, String dataTopic) { super(01); this.topic = topic; this.dataTopic = dataTopic; dao = new DbusDataDaoImpl(); Properties props = HeartBeatConfigContainer.getInstance().getmaasConf().getConsumerProp(); Properties producerProps = HeartBeatConfigContainer.getInstance().getmaasConf().getProducerProp(); try { LoggerFactory.getLogger().info("[topic] ...." + topic); LoggerFactory.getLogger().info("[maas-event] initial........................."); dataConsumer = new KafkaConsumer<>(props); partition0 = new TopicPartition(this.topic, 0); dataConsumer.assign(Arrays.asList(partition0)); dataConsumer.seekToEnd(Arrays.asList(partition0)); statProducer = new KafkaProducer<>(producerProps); } catch (Exception e) { e.printStackTrace(); LoggerFactory.getLogger().error(e.getMessage(), e); } }
Example #9
Source File: KafkaReader.java From DBus with Apache License 2.0 | 6 votes |
/** * createConsumer - create a new consumer * * @return * @throws Exception */ private Consumer<String, String> createConsumer() throws Exception { // Seek to end automatically TopicPartition dataTopicPartition = new TopicPartition(topicName, 0); List<TopicPartition> topics = Arrays.asList(dataTopicPartition); Properties props = ConfUtils.getProps(CONSUMER_PROPS); Consumer<String, String> consumer = new KafkaConsumer<>(props); consumer.assign(topics); if (offset == -1) { consumer.seekToEnd(topics); logger.info("Consumer seek to end"); } else { consumer.seek(dataTopicPartition, offset); logger.info(String.format("read changed as offset: %s", consumer.position(dataTopicPartition))); } return consumer; }
Example #10
Source File: TopicPartitionsOffsetInfo.java From kafka-utilities with Apache License 2.0 | 6 votes |
public Map<TopicPartition, OffsetFetchResponse.PartitionData> getCommitedOffsets(final String groupName, final List<TopicPartition> topicPartitions, final long responseWaitTime) throws OffsetFetchException { if(this.coordinator == null){ throw new OffsetFetchException("Missing Group Coordinator for group:" + groupName); } OffsetFetchRequest.Builder offsetRequestBuilder = new OffsetFetchRequest.Builder(groupName, topicPartitions); this.kafkaApiRequest.sendApiRequest(this.coordinator, offsetRequestBuilder); OffsetFetchResponse offsetFetchResponse =(OffsetFetchResponse) this.kafkaApiRequest.getLastApiResponse(responseWaitTime); if(offsetFetchResponse.error() == Errors.NONE) { return offsetFetchResponse.responseData(); }else{ throw new OffsetFetchException(offsetFetchResponse.error().message()); } }
Example #11
Source File: KafkaQueryChangeLogIT.java From rya with Apache License 2.0 | 6 votes |
@Test public void readFromPosition_positionStartsNotBegining() throws Exception { final List<QueryChange> expected = write10ChangesToChangeLog().subList(5, 10); // set the position to some non-0 position final TopicPartition partition = new TopicPartition(topic, 0); consumer.assign(Lists.newArrayList(partition)); consumer.seekToEnd(Lists.newArrayList(partition)); final CloseableIteration<ChangeLogEntry<QueryChange>, QueryChangeLogException> iter = changeLog.readFromPosition(5L); final List<QueryChange> actual = new ArrayList<>(); while (iter.hasNext()) { final ChangeLogEntry<QueryChange> entry = iter.next(); actual.add(entry.getEntry()); } assertEquals(expected, actual); }
Example #12
Source File: WorkerInstance.java From uReplicator with Apache License 2.0 | 6 votes |
/** * Adds topic partition to worker instance * * @param topic topic name * @param partition partition id * @param startingOffset starting offset for topic partition * @param endingOffset ending offset for topic partition * @param dstTopic topic name in destination cluster */ public void addTopicPartition(String topic, int partition, Long startingOffset, Long endingOffset, String dstTopic) { if (observer != null) { observer.addTopic(topic); } if (StringUtils.isNotBlank(dstTopic)) { topicMapping.put(topic, dstTopic); } TopicPartition topicPartition = new TopicPartition(topic, partition); long offset = startingOffset != null ? startingOffset : checkpointManager.fetchOffset(topicPartition); LOGGER.info("Adding topic: {}, partition {}, starting offset {}", topic, partition, offset); PartitionOffsetInfo offsetInfo = new PartitionOffsetInfo(topicPartition, offset, endingOffset); fetcherManager.addTopicPartition(topicPartition, offsetInfo); }
Example #13
Source File: TestKafkaSystemAdminWithMock.java From samza with Apache License 2.0 | 6 votes |
@Test public void testGetSystemStreamMetaDataWithRetry() { final List<PartitionInfo> partitionInfosForTopic = ImmutableList.of(mockPartitionInfo0, mockPartitionInfo1); when(mockKafkaConsumer.partitionsFor(VALID_TOPIC)).thenThrow(new RuntimeException()) .thenReturn(partitionInfosForTopic); Map<String, SystemStreamMetadata> metadataMap = kafkaSystemAdmin.getSystemStreamMetadata(ImmutableSet.of(VALID_TOPIC)); assertEquals("metadata should return for 1 topic", metadataMap.size(), 1); // retried twice because the first fails and the second succeeds Mockito.verify(mockKafkaConsumer, Mockito.times(2)).partitionsFor(VALID_TOPIC); final List<TopicPartition> topicPartitions = Arrays.asList(new TopicPartition(mockPartitionInfo0.topic(), mockPartitionInfo0.partition()), new TopicPartition(mockPartitionInfo1.topic(), mockPartitionInfo1.partition())); // the following methods thereafter are only called once Mockito.verify(mockKafkaConsumer, Mockito.times(1)).beginningOffsets(topicPartitions); Mockito.verify(mockKafkaConsumer, Mockito.times(1)).endOffsets(topicPartitions); }
Example #14
Source File: TopicPartitionDeserializer.java From kafka-graphs with Apache License 2.0 | 6 votes |
@Override public TopicPartition deserialize(String topic, byte[] data) { if (data == null || data.length == 0) { return null; } try { ByteBuffer buf = ByteBuffer.wrap(data); int topicLength = buf.getInt(); byte[] topicBytes = new byte[topicLength]; buf.get(topicBytes); String otherTopic = new String(topicBytes, ENCODING); int partition = buf.getInt(); return new TopicPartition(otherTopic, partition); } catch (UnsupportedEncodingException e) { throw new SerializationException("Error when deserializing byte[] to string"); } }
Example #15
Source File: KafkaRecordsConsumerTest.java From synapse with Apache License 2.0 | 6 votes |
@Test public void shouldNotChangeDurationBehindOnNoRecords() { // given final KafkaRecordsConsumer consumer = someKafkaRecordsConsumer(fromHorizon()); durationBehindHandler.onPartitionsAssigned(asList(new TopicPartition("", 0), new TopicPartition("", 1))); ConsumerRecord<String,String> consumerRecord = new ConsumerRecord<>("", 0, 23, now().minusSeconds(100).toEpochMilli(), TimestampType.CREATE_TIME, 0, 0, 0, "key", "value"); consumer.apply(new ConsumerRecords<>(ImmutableMap.of(new TopicPartition("", 0), singletonList(consumerRecord)))); assertThat(getSecondsBehind("0"), is(100L)); assertThat(getSecondsBehind("1"), is(9223372036854775L)); // when consumer.apply(ConsumerRecords.empty()); // then assertThat(getSecondsBehind("0"), is(100L)); assertThat(getSecondsBehind("1"), is(9223372036854775L)); }
Example #16
Source File: OffsetCommitWorkerIntegrationTest.java From beast with Apache License 2.0 | 6 votes |
@Test public void shouldStopWhenNoAcknowledgements() throws InterruptedException { Map<TopicPartition, OffsetAndMetadata> offsetMap1 = recordsUtil.createRecords("driver-", 3).getPartitionsCommitOffset(); Map<TopicPartition, OffsetAndMetadata> offsetMap2 = recordsUtil.createRecords("customer-", 3).getPartitionsCommitOffset(); Map<TopicPartition, OffsetAndMetadata> offsetMap3 = recordsUtil.createRecords("merchant-", 3).getPartitionsCommitOffset(); List<Map<TopicPartition, OffsetAndMetadata>> recordsList = Arrays.asList(offsetMap1, offsetMap2, offsetMap3); commitQueue.addAll(recordsList); committer.setDefaultSleepMs(10); Thread committerThread = new Thread(committer); committerThread.start(); committerThread.join(); InOrder inOrder = inOrder(kafkaConsumer); inOrder.verify(kafkaConsumer, never()).commitSync(anyMap()); assertEquals(2, commitQueue.size()); inOrder.verify(kafkaConsumer, atLeastOnce()).wakeup(anyString()); assertTrue(acknowledgements.isEmpty()); }
Example #17
Source File: TopicPartitionRecordGrouper.java From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 | 6 votes |
private String generateRecordKey(final TopicPartition tp, final SinkRecord headRecord) { //FIXME move into commons lib final Function<Parameter, String> setKafkaOffset = usePaddingParameter -> usePaddingParameter.asBoolean() ? String.format("%020d", headRecord.kafkaOffset()) : Long.toString(headRecord.kafkaOffset()); return filenameTemplate.instance() .bindVariable(FilenameTemplateVariable.TOPIC.name, tp::topic) .bindVariable( FilenameTemplateVariable.PARTITION.name, () -> Integer.toString(tp.partition()) ).bindVariable( FilenameTemplateVariable.START_OFFSET.name, setKafkaOffset ).bindVariable( FilenameTemplateVariable.TIMESTAMP.name, setTimestamp ).render(); }
Example #18
Source File: ExecutionTaskManagerTest.java From cruise-control with BSD 2-Clause "Simplified" License | 5 votes |
@Test public void testStateChangeSequences() { TopicPartition tp = new TopicPartition("topic", 0); ExecutionTaskManager taskManager = new ExecutionTaskManager(null, new MetricRegistry(), new SystemTime(), new KafkaCruiseControlConfig(KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties())); List<List<ExecutionTask.State>> testSequences = new ArrayList<>(); // Completed successfully. testSequences.add(Arrays.asList(IN_PROGRESS, COMPLETED)); // Rollback succeeded. testSequences.add(Arrays.asList(IN_PROGRESS, ABORTING, ABORTED)); // Rollback failed. testSequences.add(Arrays.asList(IN_PROGRESS, ABORTING, DEAD)); // Cannot rollback. testSequences.add(Arrays.asList(IN_PROGRESS, DEAD)); ReplicaPlacementInfo r0 = new ReplicaPlacementInfo(0); ReplicaPlacementInfo r1 = new ReplicaPlacementInfo(1); ReplicaPlacementInfo r2 = new ReplicaPlacementInfo(2); for (List<ExecutionTask.State> sequence : testSequences) { taskManager.clear(); // Make sure the proposal does not involve leader movement. ExecutionProposal proposal = new ExecutionProposal(tp, 10, r2, Arrays.asList(r0, r2), Arrays.asList(r2, r1)); taskManager.setExecutionModeForTaskTracker(false); taskManager.addExecutionProposals(Collections.singletonList(proposal), Collections.emptySet(), generateExpectedCluster(proposal, tp), null); taskManager.setRequestedInterBrokerPartitionMovementConcurrency(null); taskManager.setRequestedIntraBrokerPartitionMovementConcurrency(null); taskManager.setRequestedLeadershipMovementConcurrency(null); List<ExecutionTask> tasks = taskManager.getInterBrokerReplicaMovementTasks(); assertEquals(1, tasks.size()); ExecutionTask task = tasks.get(0); verifyStateChangeSequence(sequence, task, taskManager); } }
Example #19
Source File: PastReplicaStatsProcessor.java From doctorkafka with Apache License 2.0 | 5 votes |
public PastReplicaStatsProcessor(String zkUrl, SecurityProtocol securityProtocol, TopicPartition topicPartition, long startOffset, long endOffset, ReplicaStatsManager replicaStatsManager) { this.zkUrl = zkUrl; this.securityProtocol = securityProtocol; this.topicPartition = topicPartition; this.startOffset = startOffset; this.endOffset = endOffset; this.replicaStatsManager = replicaStatsManager; }
Example #20
Source File: KafkaFetcher.java From flink with Apache License 2.0 | 5 votes |
protected void emitRecord( T record, KafkaTopicPartitionState<TopicPartition> partition, long offset, ConsumerRecord<?, ?> consumerRecord) throws Exception { emitRecordWithTimestamp(record, partition, offset, consumerRecord.timestamp()); }
Example #21
Source File: GraphUtils.java From kafka-graphs with Apache License 2.0 | 5 votes |
public SendMessages(String topic, Producer<K, V> producer, Map<TopicPartition, Long> lastWrittenOffsets ) { this.topic = topic; this.producer = producer; this.lastWrittenOffsets = lastWrittenOffsets; }
Example #22
Source File: KafkaPartitionMetricSampleAggregatorTest.java From cruise-control with BSD 2-Clause "Simplified" License | 5 votes |
@Test public void testFallbackToAvgAdjacent() throws NotEnoughValidWindowsException { KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties()); TopicPartition anotherTopicPartition = new TopicPartition("AnotherTopic", 1); PartitionEntity anotherPartitionEntity = new PartitionEntity(anotherTopicPartition); Metadata metadata = getMetadata(Arrays.asList(TP, anotherTopicPartition)); KafkaPartitionMetricSampleAggregator metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata); // Only give one sample to the aggregator for previous period. populateSampleAggregator(NUM_WINDOWS, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator); // Create let (NUM_WINDOWS + 1) have enough samples. CruiseControlUnitTestUtils.populateSampleAggregator(1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, PE, NUM_WINDOWS, WINDOW_MS, KafkaMetricDef.commonMetricDef()); // Let a window exist but not containing samples for partition 0 CruiseControlUnitTestUtils.populateSampleAggregator(1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, anotherPartitionEntity, NUM_WINDOWS + 1, WINDOW_MS, KafkaMetricDef .commonMetricDef()); // Let the rest of the window has enough samples. CruiseControlUnitTestUtils.populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, PE, NUM_WINDOWS + 2, WINDOW_MS, KafkaMetricDef.commonMetricDef()); MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(metadata.fetch(), NUM_WINDOWS * WINDOW_MS * 2, new OperationProgress()); int numWindows = result.valuesAndExtrapolations().get(PE).metricValues().length(); assertEquals(NUM_WINDOWS, numWindows); int numExtrapolations = 0; for (Map.Entry<Integer, Extrapolation> entry : result.valuesAndExtrapolations().get(PE).extrapolations().entrySet()) { assertEquals(Extrapolation.AVG_ADJACENT, entry.getValue()); numExtrapolations++; } assertEquals(1, numExtrapolations); }
Example #23
Source File: DistributedClusterTest.java From kop with Apache License 2.0 | 5 votes |
protected void kafkaConsumeCommitMessage(KConsumer kConsumer, int numMessages, String messageStrPrefix, List<TopicPartition> topicPartitions) { kConsumer.getConsumer().assign(topicPartitions); int i = 0; while (i < numMessages) { if (log.isDebugEnabled()) { log.debug("kConsumer {} start poll message: {}", kConsumer.getTopic() + kConsumer.getConsumerGroup(), i); } ConsumerRecords<Integer, String> records = kConsumer.getConsumer().poll(Duration.ofSeconds(1)); for (ConsumerRecord<Integer, String> record : records) { Integer key = record.key(); assertEquals(messageStrPrefix + key.toString(), record.value()); if (log.isDebugEnabled()) { log.debug("Kafka consumer get message: {}, key: {} at offset {}", record.key(), record.value(), record.offset()); } i++; } } assertEquals(i, numMessages); try { kConsumer.getConsumer().commitSync(Duration.ofSeconds(1)); } catch (Exception e) { log.error("Commit offset failed: ", e); } if (log.isDebugEnabled()) { log.debug("kConsumer {} finished poll and commit message: {}", kConsumer.getTopic() + kConsumer.getConsumerGroup(), i); } }
Example #24
Source File: NewApiTopicConsumer.java From jeesuite-libs with Apache License 2.0 | 5 votes |
private void commitOffsets(ConsumerWorker worker) { KafkaConsumer<String, Serializable> consumer = worker.consumer; if(worker.isCommiting())return; worker.setCommiting(true); try { if(worker.uncommittedOffsetMap.isEmpty())return ; logger.debug("committing the offsets : {}", worker.uncommittedOffsetMap); consumer.commitAsync(worker.uncommittedOffsetMap, new OffsetCommitCallback() { @Override public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) { // worker.setCommiting(false); if(exception == null){ worker.resetUncommittedOffsetMap(); logger.debug("committed the offsets : {}",offsets); }else{ logger.error("committ the offsets error",exception); } } }); } finally { } }
Example #25
Source File: GroupMetadataManagerTest.java From kop with Apache License 2.0 | 5 votes |
private int appendConsumerOffsetCommit(ByteBuffer buffer, long baseOffset, Map<TopicPartition, Long> offsets) { MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.LOG_APPEND_TIME, baseOffset); List<SimpleRecord> commitRecords = createCommittedOffsetRecords(offsets, groupId); commitRecords.forEach(builder::append); builder.build(); return offsets.size(); }
Example #26
Source File: OffsetSource.java From kafka-backup with Apache License 2.0 | 5 votes |
private void findOffsetStores(Path backupDir, String topic) throws IOException { Path topicDir = Paths.get(backupDir.toString(), topic); for (Path f : Files.list(topicDir).collect(Collectors.toList())) { Optional<Integer> partition = OffsetUtils.isOffsetStoreFile(f); if (partition.isPresent()) { TopicPartition topicPartition = new TopicPartition(topic, partition.get()); topicOffsets.put(topicPartition, new OffsetStoreFile(f)); } } }
Example #27
Source File: SocketKafkaConsumer.java From kafka-webview with MIT License | 5 votes |
private void seekToHead() { // Get all available partitions final List<TopicPartition> topicPartitions = getAllPartitions(); // Get head offsets for each partition final Map<TopicPartition, Long> headOffsets = kafkaConsumer.beginningOffsets(topicPartitions); seek(headOffsets); }
Example #28
Source File: GroupMetadata.java From kop with Apache License 2.0 | 5 votes |
public void prepareTxnOffsetCommit(long producerId, Map<TopicPartition, OffsetAndMetadata> offsets) { if (log.isTraceEnabled()) { log.trace("TxnOffsetCommit for producer {} and group {} with offsets {} is pending", producerId, groupId, offsets); } receivedTransactionalOffsetCommits = true; Map<TopicPartition, CommitRecordMetadataAndOffset> producerOffsets = pendingTransactionalOffsetCommits.computeIfAbsent(producerId, pid -> new HashMap<>()); offsets.forEach((tp, offsetsAndMetadata) -> producerOffsets.put(tp, new CommitRecordMetadataAndOffset( Optional.empty(), offsetsAndMetadata ))); }
Example #29
Source File: KafkaConsumerThread.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception ex) { commitInProgress = false; if (ex != null) { log.warn("Committing offsets to Kafka failed. This does not compromise Flink's checkpoints.", ex); internalCommitCallback.onException(ex); } else { internalCommitCallback.onSuccess(); } }
Example #30
Source File: KafkaActorThread.java From elasticactors with Apache License 2.0 | 5 votes |
private void assignPartitions() { // assign the message partitions List<TopicPartition> messagePartitions = this.localShards.keySet().stream() .map(managedActorShard -> new TopicPartition(messagesTopic, managedActorShard.getShardId())).collect(Collectors.toList()); if(localActorNode != null) { // node topics have exactly the number of partitions as there are KafkaActorThreads per node // this is very fickle but needed to support the affinityKey logic for TempActors messagePartitions.add(new TopicPartition(getNodeMessagesTopic(internalActorSystem, localActorNode.actorNode.getKey().getNodeId()), nodeTopicPartitionId)); } this.messageConsumer.assign(messagePartitions); // also need to assign the state partitions List<TopicPartition> statePartitions = this.localShards.keySet().stream() .map(managedActorShard -> new TopicPartition(persistentActorsTopic, managedActorShard.getShardId())).collect(Collectors.toList()); this.stateConsumer.assign(statePartitions); // and the scheduled messages List<TopicPartition> scheduledMessagesPartitions = this.localShards.keySet().stream() .map(managedActorShard -> new TopicPartition(scheduledMessagesTopic, managedActorShard.getShardId())).collect(Collectors.toList()); this.scheduledMessagesConsumer.assign(scheduledMessagesPartitions); // the actorsystem event listeners List<TopicPartition> actorSystemEventListenersPartitions = this.localShards.keySet().stream() .map(managedActorShard -> new TopicPartition(actorSystemEventListenersTopic, managedActorShard.getShardId())).collect(Collectors.toList()); this.actorSystemEventListenersConsumer.assign(actorSystemEventListenersPartitions); }