Java Code Examples for org.apache.kafka.connect.sink.SinkRecord#kafkaOffset()
The following examples show how to use
org.apache.kafka.connect.sink.SinkRecord#kafkaOffset() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PlainPayload.java From kafka-connect-lambda with Apache License 2.0 | 6 votes |
public PlainPayload(final SinkRecord record) { this.key = record.key() == null ? "" : record.key().toString(); if (record.keySchema() != null) this.keySchemaName = record.keySchema().name(); this.value = record.value() == null ? "" : record.value().toString(); if (record.valueSchema() != null) this.valueSchemaName = record.valueSchema().name(); this.topic = record.topic(); this.partition = record.kafkaPartition(); this.offset = record.kafkaOffset(); if (record.timestamp() != null) this.timestamp = record.timestamp(); if (record.timestampType() != null) this.timestampTypeName = record.timestampType().name; }
Example 2
Source File: BackupSinkTask.java From kafka-backup with Apache License 2.0 | 6 votes |
@Override public void put(Collection<SinkRecord> records) { try { for (SinkRecord sinkRecord : records) { TopicPartition topicPartition = new TopicPartition(sinkRecord.topic(), sinkRecord.kafkaPartition()); PartitionWriter partition = partitionWriters.get(topicPartition); partition.append(Record.fromSinkRecord(sinkRecord)); if (sinkRecord.kafkaOffset() % 100 == 0) { log.debug("Backed up Topic {}, Partition {}, up to offset {}", sinkRecord.topic(), sinkRecord.kafkaPartition(), sinkRecord.kafkaOffset()); } if (config.snapShotMode()) { currentOffsets.put(topicPartition, sinkRecord.kafkaOffset()); } } // Todo: refactor to own worker. E.g. using the scheduler of MM2 offsetSink.syncConsumerGroups(); offsetSink.syncOffsets(); if (config.snapShotMode()) { terminateIfCompleted(); } } catch (IOException | SegmentIndex.IndexException | PartitionIndex.IndexException | SegmentWriter.SegmentException e) { throw new RuntimeException(e); } }
Example 3
Source File: SchemaUtils.java From streamx with Apache License 2.0 | 6 votes |
public static SinkRecord project(SinkRecord record, Schema currentSchema, Compatibility compatibility) { switch (compatibility) { case BACKWARD: case FULL: case FORWARD: Schema sourceSchema = record.valueSchema(); Object value = record.value(); if (sourceSchema == currentSchema || sourceSchema.equals(currentSchema)) { return record; } Object projected = SchemaProjector.project(sourceSchema, value, currentSchema); return new SinkRecord(record.topic(), record.kafkaPartition(), record.keySchema(), record.key(), currentSchema, projected, record.kafkaOffset()); default: return record; } }
Example 4
Source File: CouchbaseSinkTask.java From kafka-connect-couchbase with Apache License 2.0 | 6 votes |
private static String documentIdFromKafkaMetadata(SinkRecord record) { Object key = record.key(); if (key instanceof String || key instanceof Number || key instanceof Boolean) { return key.toString(); } if (key instanceof byte[]) { return new String((byte[]) key, UTF_8); } if (key instanceof ByteBuffer) { return toString((ByteBuffer) key); } return record.topic() + "/" + record.kafkaPartition() + "/" + record.kafkaOffset(); }
Example 5
Source File: Payload.java From kafka-connect-lambda with Apache License 2.0 | 5 votes |
public Payload(final SinkRecord record) { if (record.keySchema() != null) { this.keySchemaName = record.keySchema().name(); if (record.keySchema().version() != null ) { this.keySchemaVersion = record.keySchema().version().toString(); } } if (record.valueSchema() != null) { this.valueSchemaName = record.valueSchema().name(); if (record.valueSchema().version() != null ) { this.valueSchemaVersion = record.valueSchema().version().toString(); } } this.topic = record.topic(); this.partition = record.kafkaPartition(); this.offset = record.kafkaOffset(); if (record.timestamp() != null) { this.timestamp = record.timestamp(); } if (record.timestampType() != null) { this.timestampTypeName = record.timestampType().name; } }
Example 6
Source File: SnowflakeSinkServiceV1.java From snowflake-kafka-connector with Apache License 2.0 | 5 votes |
private void insert(SinkRecord record) { String data = recordService.processRecord(record); if (bufferSize == 0) { firstOffset = record.kafkaOffset(); } stringBuilder.append(data); numOfRecord++; bufferSize += data.length() * 2; //1 char = 2 bytes lastOffset = record.kafkaOffset(); }
Example 7
Source File: Record.java From kafka-backup with Apache License 2.0 | 5 votes |
public static Record fromSinkRecord(SinkRecord sinkRecord) { byte[] key = connectDataToBytes(sinkRecord.keySchema(), sinkRecord.key()); byte[] value = connectDataToBytes(sinkRecord.valueSchema(), sinkRecord.value()); RecordHeaders recordHeaders = new RecordHeaders(); for (org.apache.kafka.connect.header.Header connectHeader : sinkRecord.headers()) { byte[] headerValue = connectDataToBytes(connectHeader.schema(), connectHeader.value()); recordHeaders.add(connectHeader.key(), headerValue); } return new Record(sinkRecord.topic(), sinkRecord.kafkaPartition(), key, value, sinkRecord.kafkaOffset(), sinkRecord.timestamp(), sinkRecord.timestampType(), recordHeaders); }
Example 8
Source File: KafkaMetaDataStrategy.java From kafka-connect-mongodb with Apache License 2.0 | 5 votes |
@Override public BsonValue generateId(SinkDocument doc, SinkRecord orig) { return new BsonString(orig.topic() + DELIMITER + orig.kafkaPartition() + DELIMITER + orig.kafkaOffset()); }
Example 9
Source File: TopicPartitionWriter.java From streamx with Apache License 2.0 | 5 votes |
private void writeRecord(SinkRecord record) throws IOException { long expectedOffset = offset + recordCounter; if (offset == -1) { offset = record.kafkaOffset(); } else if (record.kafkaOffset() != expectedOffset) { // Currently it's possible to see stale data with the wrong offset after a rebalance when you // rewind, which we do since we manage our own offsets. See KAFKA-2894. if (!sawInvalidOffset) { log.info( "Ignoring stale out-of-order record in {}-{}. Has offset {} instead of expected offset {}", record.topic(), record.kafkaPartition(), record.kafkaOffset(), expectedOffset); } sawInvalidOffset = true; return; } if (sawInvalidOffset) { log.info( "Recovered from stale out-of-order records in {}-{} with offset {}", record.topic(), record.kafkaPartition(), expectedOffset); sawInvalidOffset = false; } String encodedPartition = partitioner.encodePartition(record); RecordWriter<SinkRecord> writer = getWriter(record, encodedPartition); writer.write(record); if (!startOffsets.containsKey(encodedPartition)) { startOffsets.put(encodedPartition, record.kafkaOffset()); offsets.put(encodedPartition, record.kafkaOffset()); } else { offsets.put(encodedPartition, record.kafkaOffset()); } recordCounter++; }
Example 10
Source File: GcsSinkTaskGroupByTopicPartitionPropertiesTest.java From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 | 4 votes |
private String createFilename(final SinkRecord record) { return PREFIX + record.topic() + "-" + record.kafkaPartition() + "-" + record.kafkaOffset(); }
Example 11
Source File: KafkaMetaDataStrategy.java From mongo-kafka with Apache License 2.0 | 4 votes |
@Override public BsonValue generateId(final SinkDocument doc, final SinkRecord orig) { return new BsonString( orig.topic() + DELIMITER + orig.kafkaPartition() + DELIMITER + orig.kafkaOffset()); }
Example 12
Source File: JsonRecordParser.java From kafka-connect-zeebe with Apache License 2.0 | 4 votes |
private String generateId(final SinkRecord record) { return record.topic() + ":" + record.kafkaPartition() + ":" + record.kafkaOffset(); }
Example 13
Source File: MongoDbWriter.java From MongoDb-Sink-Connector with Apache License 2.0 | 4 votes |
KafkaDocument(SinkRecord record) { this.record = record; partition = new TopicPartition(record.topic(), record.kafkaPartition()); offset = record.kafkaOffset(); }