Java Code Examples for org.apache.kafka.connect.sink.SinkRecord#kafkaPartition()
The following examples show how to use
org.apache.kafka.connect.sink.SinkRecord#kafkaPartition() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PlainPayload.java From kafka-connect-lambda with Apache License 2.0 | 6 votes |
public PlainPayload(final SinkRecord record) { this.key = record.key() == null ? "" : record.key().toString(); if (record.keySchema() != null) this.keySchemaName = record.keySchema().name(); this.value = record.value() == null ? "" : record.value().toString(); if (record.valueSchema() != null) this.valueSchemaName = record.valueSchema().name(); this.topic = record.topic(); this.partition = record.kafkaPartition(); this.offset = record.kafkaOffset(); if (record.timestamp() != null) this.timestamp = record.timestamp(); if (record.timestampType() != null) this.timestampTypeName = record.timestampType().name; }
Example 2
Source File: TopicPartitionRecordGrouper.java From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 | 6 votes |
@Override public void put(final SinkRecord record) { Objects.requireNonNull(record, "record cannot be null"); final TopicPartition tp = new TopicPartition(record.topic(), record.kafkaPartition()); final SinkRecord currentHeadRecord = currentHeadRecords.computeIfAbsent(tp, ignored -> record); final String recordKey = generateRecordKey(tp, currentHeadRecord); if (shouldCreateNewFile(recordKey)) { // Create new file using this record as the head record. currentHeadRecords.put(tp, record); final String newRecordKey = generateRecordKey(tp, record); fileBuffers.computeIfAbsent(newRecordKey, ignored -> new ArrayList<>()).add(record); } else { fileBuffers.computeIfAbsent(recordKey, ignored -> new ArrayList<>()).add(record); } }
Example 3
Source File: BackupSinkTask.java From kafka-backup with Apache License 2.0 | 6 votes |
@Override public void put(Collection<SinkRecord> records) { try { for (SinkRecord sinkRecord : records) { TopicPartition topicPartition = new TopicPartition(sinkRecord.topic(), sinkRecord.kafkaPartition()); PartitionWriter partition = partitionWriters.get(topicPartition); partition.append(Record.fromSinkRecord(sinkRecord)); if (sinkRecord.kafkaOffset() % 100 == 0) { log.debug("Backed up Topic {}, Partition {}, up to offset {}", sinkRecord.topic(), sinkRecord.kafkaPartition(), sinkRecord.kafkaOffset()); } if (config.snapShotMode()) { currentOffsets.put(topicPartition, sinkRecord.kafkaOffset()); } } // Todo: refactor to own worker. E.g. using the scheduler of MM2 offsetSink.syncConsumerGroups(); offsetSink.syncOffsets(); if (config.snapShotMode()) { terminateIfCompleted(); } } catch (IOException | SegmentIndex.IndexException | PartitionIndex.IndexException | SegmentWriter.SegmentException e) { throw new RuntimeException(e); } }
Example 4
Source File: SchemaUtils.java From streamx with Apache License 2.0 | 6 votes |
public static SinkRecord project(SinkRecord record, Schema currentSchema, Compatibility compatibility) { switch (compatibility) { case BACKWARD: case FULL: case FORWARD: Schema sourceSchema = record.valueSchema(); Object value = record.value(); if (sourceSchema == currentSchema || sourceSchema.equals(currentSchema)) { return record; } Object projected = SchemaProjector.project(sourceSchema, value, currentSchema); return new SinkRecord(record.topic(), record.kafkaPartition(), record.keySchema(), record.key(), currentSchema, projected, record.kafkaOffset()); default: return record; } }
Example 5
Source File: MongoDbSinkTask.java From MongoDb-Sink-Connector with Apache License 2.0 | 6 votes |
@Override public void put(Collection<SinkRecord> sinkRecords) { if (writer == null) { return; } putTimer.start(); for (SinkRecord record : sinkRecords) { TopicPartition partition = new TopicPartition(record.topic(), record.kafkaPartition()); latestOffsetPut.put(partition, record.kafkaOffset()); buffer.add(record); monitor.increment(); if (log.isDebugEnabled()) { log.debug("{} --> {}", partition, record.kafkaOffset()); } } putTimer.stop(); }
Example 6
Source File: CouchbaseSinkTask.java From kafka-connect-couchbase with Apache License 2.0 | 6 votes |
private static String documentIdFromKafkaMetadata(SinkRecord record) { Object key = record.key(); if (key instanceof String || key instanceof Number || key instanceof Boolean) { return key.toString(); } if (key instanceof byte[]) { return new String((byte[]) key, UTF_8); } if (key instanceof ByteBuffer) { return toString((ByteBuffer) key); } return record.topic() + "/" + record.kafkaPartition() + "/" + record.kafkaOffset(); }
Example 7
Source File: Payload.java From kafka-connect-lambda with Apache License 2.0 | 5 votes |
public Payload(final SinkRecord record) { if (record.keySchema() != null) { this.keySchemaName = record.keySchema().name(); if (record.keySchema().version() != null ) { this.keySchemaVersion = record.keySchema().version().toString(); } } if (record.valueSchema() != null) { this.valueSchemaName = record.valueSchema().name(); if (record.valueSchema().version() != null ) { this.valueSchemaVersion = record.valueSchema().version().toString(); } } this.topic = record.topic(); this.partition = record.kafkaPartition(); this.offset = record.kafkaOffset(); if (record.timestamp() != null) { this.timestamp = record.timestamp(); } if (record.timestampType() != null) { this.timestampTypeName = record.timestampType().name; } }
Example 8
Source File: Record.java From kafka-backup with Apache License 2.0 | 5 votes |
public static Record fromSinkRecord(SinkRecord sinkRecord) { byte[] key = connectDataToBytes(sinkRecord.keySchema(), sinkRecord.key()); byte[] value = connectDataToBytes(sinkRecord.valueSchema(), sinkRecord.value()); RecordHeaders recordHeaders = new RecordHeaders(); for (org.apache.kafka.connect.header.Header connectHeader : sinkRecord.headers()) { byte[] headerValue = connectDataToBytes(connectHeader.schema(), connectHeader.value()); recordHeaders.add(connectHeader.key(), headerValue); } return new Record(sinkRecord.topic(), sinkRecord.kafkaPartition(), key, value, sinkRecord.kafkaOffset(), sinkRecord.timestamp(), sinkRecord.timestampType(), recordHeaders); }
Example 9
Source File: KafkaMetaDataStrategy.java From kafka-connect-mongodb with Apache License 2.0 | 5 votes |
@Override public BsonValue generateId(SinkDocument doc, SinkRecord orig) { return new BsonString(orig.topic() + DELIMITER + orig.kafkaPartition() + DELIMITER + orig.kafkaOffset()); }
Example 10
Source File: GcsSinkTaskGroupByTopicPartitionPropertiesTest.java From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 | 4 votes |
private String createFilename(final SinkRecord record) { return PREFIX + record.topic() + "-" + record.kafkaPartition() + "-" + record.kafkaOffset(); }
Example 11
Source File: KafkaMetaDataStrategy.java From mongo-kafka with Apache License 2.0 | 4 votes |
@Override public BsonValue generateId(final SinkDocument doc, final SinkRecord orig) { return new BsonString( orig.topic() + DELIMITER + orig.kafkaPartition() + DELIMITER + orig.kafkaOffset()); }
Example 12
Source File: JsonRecordParser.java From kafka-connect-zeebe with Apache License 2.0 | 4 votes |
private String generateId(final SinkRecord record) { return record.topic() + ":" + record.kafkaPartition() + ":" + record.kafkaOffset(); }
Example 13
Source File: MongoDbWriter.java From MongoDb-Sink-Connector with Apache License 2.0 | 4 votes |
KafkaDocument(SinkRecord record) { this.record = record; partition = new TopicPartition(record.topic(), record.kafkaPartition()); offset = record.kafkaOffset(); }