Java Code Examples for org.apache.kafka.connect.sink.SinkRecord#key()
The following examples show how to use
org.apache.kafka.connect.sink.SinkRecord#key() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PlainPayload.java From kafka-connect-lambda with Apache License 2.0 | 6 votes |
public PlainPayload(final SinkRecord record) { this.key = record.key() == null ? "" : record.key().toString(); if (record.keySchema() != null) this.keySchemaName = record.keySchema().name(); this.value = record.value() == null ? "" : record.value().toString(); if (record.valueSchema() != null) this.valueSchemaName = record.valueSchema().name(); this.topic = record.topic(); this.partition = record.kafkaPartition(); this.offset = record.kafkaOffset(); if (record.timestamp() != null) this.timestamp = record.timestamp(); if (record.timestampType() != null) this.timestampTypeName = record.timestampType().name; }
Example 2
Source File: SinkConverter.java From mongo-kafka with Apache License 2.0 | 6 votes |
public SinkDocument convert(final SinkRecord record) { LOGGER.debug(record.toString()); BsonDocument keyDoc = null; if (record.key() != null) { keyDoc = new LazyBsonDocument( () -> getRecordConverter(record.key(), record.keySchema()) .convert(record.keySchema(), record.key())); } BsonDocument valueDoc = null; if (record.value() != null) { valueDoc = new LazyBsonDocument( () -> getRecordConverter(record.value(), record.valueSchema()) .convert(record.valueSchema(), record.value())); } return new SinkDocument(keyDoc, valueDoc); }
Example 3
Source File: SinkConverter.java From kafka-connect-mongodb with Apache License 2.0 | 6 votes |
public SinkDocument convert(SinkRecord record) { logger.debug(record.toString()); BsonDocument keyDoc = null; if(record.key() != null) { keyDoc = getRecordConverter(record.key(),record.keySchema()) .convert(record.keySchema(), record.key()); } BsonDocument valueDoc = null; if(record.value() != null) { valueDoc = getRecordConverter(record.value(),record.valueSchema()) .convert(record.valueSchema(), record.value()); } return new SinkDocument(keyDoc, valueDoc); }
Example 4
Source File: SchemaUtils.java From streamx with Apache License 2.0 | 6 votes |
public static SinkRecord project(SinkRecord record, Schema currentSchema, Compatibility compatibility) { switch (compatibility) { case BACKWARD: case FULL: case FORWARD: Schema sourceSchema = record.valueSchema(); Object value = record.value(); if (sourceSchema == currentSchema || sourceSchema.equals(currentSchema)) { return record; } Object projected = SchemaProjector.project(sourceSchema, value, currentSchema); return new SinkRecord(record.topic(), record.kafkaPartition(), record.keySchema(), record.key(), currentSchema, projected, record.kafkaOffset()); default: return record; } }
Example 5
Source File: CouchbaseSinkTask.java From kafka-connect-couchbase with Apache License 2.0 | 6 votes |
private static String documentIdFromKafkaMetadata(SinkRecord record) { Object key = record.key(); if (key instanceof String || key instanceof Number || key instanceof Boolean) { return key.toString(); } if (key instanceof byte[]) { return new String((byte[]) key, UTF_8); } if (key instanceof ByteBuffer) { return toString((ByteBuffer) key); } return record.topic() + "/" + record.kafkaPartition() + "/" + record.kafkaOffset(); }
Example 6
Source File: IgniteSinkTask.java From ignite with Apache License 2.0 | 6 votes |
/** * Buffers records. * * @param records Records to inject into grid. */ @SuppressWarnings("unchecked") @Override public void put(Collection<SinkRecord> records) { try { for (SinkRecord record : records) { // Data is flushed asynchronously when CACHE_PER_NODE_DATA_SIZE is reached. if (extractor != null) { Map.Entry<Object, Object> entry = extractor.extract(record); StreamerContext.getStreamer().addData(entry.getKey(), entry.getValue()); } else { if (record.key() != null) { StreamerContext.getStreamer().addData(record.key(), record.value()); } else { log.error("Failed to stream a record with null key!"); } } } } catch (ConnectException e) { log.error("Failed adding record", e); throw new ConnectException(e); } }
Example 7
Source File: JsonPayloadFormatter.java From kafka-connect-lambda with Apache License 2.0 | 5 votes |
private Payload<Object, Object> recordToPayload(final SinkRecord record) { Object deserializedKey; Object deserializedValue; if (record.keySchema() == null) { deserializedKey = record.key(); } else { deserializedKey = deserialize(keySchemaVisibility, record.topic(), record.keySchema(), record.key()); } if (record.valueSchema() == null) { deserializedValue = record.value(); } else { deserializedValue = deserialize(valueSchemaVisibility, record.topic(), record.valueSchema(), record.value()); } Payload<Object, Object> payload = new Payload<>(record); payload.setKey(deserializedKey); payload.setValue(deserializedValue); if (keySchemaVisibility == SchemaVisibility.NONE) { payload.setKeySchemaName(null); payload.setKeySchemaVersion(null); } if (valueSchemaVisibility == SchemaVisibility.NONE) { payload.setValueSchemaName(null); payload.setValueSchemaVersion(null); } return payload; }
Example 8
Source File: KeyRecordGrouper.java From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 | 5 votes |
private String generateRecordKey(final SinkRecord record) { final Supplier<String> setKey = () -> { if (record.key() == null) { return "null"; } else if (record.keySchema().type() == Schema.Type.STRING) { return (String) record.key(); } else { return record.key().toString(); } }; return filenameTemplate.instance() .bindVariable(FilenameTemplateVariable.KEY.name, setKey) .render(); }
Example 9
Source File: KeyWriter.java From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 | 5 votes |
/** * Takes the {@link SinkRecord}'s key as a byte array. * * <p>If the key is {@code null}, it outputs nothing. * * <p>If the key is not {@code null}, it assumes the key <b>is</b> a byte array. * * @param record the record to get the key from * @param outputStream the stream to write to * @throws DataException when the key is not actually a byte array */ @Override public void write(final SinkRecord record, final OutputStream outputStream) throws IOException { Objects.requireNonNull(record, "record cannot be null"); Objects.requireNonNull(record.keySchema(), "key schema cannot be null"); Objects.requireNonNull(outputStream, "outputStream cannot be null"); if (record.keySchema().type() != Schema.Type.BYTES && record.keySchema().type() != Schema.Type.STRING) { final String msg = String.format("Record key schema type must be %s or %s, %s given", Schema.Type.BYTES, Schema.Type.STRING, record.keySchema().type()); throw new DataException(msg); } // Do nothing if the key is null. if (record.key() == null) { return; } if (record.key() instanceof byte[]) { outputStream.write(Base64.getEncoder().encode((byte[]) record.key())); } else if (record.key() instanceof String) { outputStream.write(Base64.getEncoder().encode(((String) record.key()).getBytes())); } else { throw new DataException("Key is not byte[] or String"); } }
Example 10
Source File: RecordService.java From snowflake-kafka-connector with Apache License 2.0 | 5 votes |
private void putKey(SinkRecord record, ObjectNode meta) { if (record.key() == null) { return; } if (record.keySchema().toString().equals(Schema.STRING_SCHEMA.toString())) { meta.put(KEY, record.key().toString()); } else if (SnowflakeJsonSchema.NAME.equals(record.keySchema().name())) { if (!(record.key() instanceof SnowflakeRecordContent)) { throw SnowflakeErrors.ERROR_0010 .getException("Input record key should be SnowflakeRecordContent object if key schema is SNOWFLAKE_JSON_SCHEMA"); } SnowflakeRecordContent keyContent = (SnowflakeRecordContent) record.key(); ArrayNode keyNode = MAPPER.createArrayNode(); keyNode.addAll(Arrays.asList(keyContent.getData())); meta.set(KEY, keyNode); if (keyContent.getSchemaID() != SnowflakeRecordContent.NON_AVRO_SCHEMA) { meta.put(KEY_SCHEMA_ID, keyContent.getSchemaID()); } } else { throw SnowflakeErrors.ERROR_0010 .getException("Unsupported Key format, please implement either String Key Converter or Snowflake Converters"); } }
Example 11
Source File: AmazonKinesisSinkTask.java From kinesis-kafka-connector with Apache License 2.0 | 5 votes |
@Override public void put(Collection<SinkRecord> sinkRecords) { // If KinesisProducers cannot write to Kinesis Streams (because of // connectivity issues, access issues // or misconfigured shards we will pause consumption of messages till // backlog is cleared validateOutStandingRecords(); String partitionKey; for (SinkRecord sinkRecord : sinkRecords) { ListenableFuture<UserRecordResult> f; // Kinesis does not allow empty partition key if (sinkRecord.key() != null && !sinkRecord.key().toString().trim().equals("")) { partitionKey = sinkRecord.key().toString().trim(); } else { partitionKey = Integer.toString(sinkRecord.kafkaPartition()); } if (singleKinesisProducerPerPartition) f = addUserRecord(producerMap.get(sinkRecord.kafkaPartition() + "@" + sinkRecord.topic()), streamName, partitionKey, usePartitionAsHashKey, sinkRecord); else f = addUserRecord(kinesisProducer, streamName, partitionKey, usePartitionAsHashKey, sinkRecord); Futures.addCallback(f, callback); } }
Example 12
Source File: PatternRenameTest.java From kafka-connect-transform-common with Apache License 2.0 | 5 votes |
@Test public void schemaLess() { this.transformation.configure( ImmutableMap.of( PatternRenameConfig.FIELD_PATTERN_CONF, "\\.", PatternRenameConfig.FIELD_REPLACEMENT_CONF, "_" ) ); final Map<String, Object> input = ImmutableMap.of( "first.name", "example", "last.name", "user" ); final Map<String, Object> expected = ImmutableMap.of( "first_name", "example", "last_name", "user" ); final Object key = isKey ? input : null; final Object value = isKey ? null : input; final Schema keySchema = null; final Schema valueSchema = null; final SinkRecord inputRecord = new SinkRecord( TOPIC, 1, keySchema, key, valueSchema, value, 1234L ); final SinkRecord outputRecord = this.transformation.apply(inputRecord); assertNotNull(outputRecord); final Map<String, Object> actual = (Map<String, Object>) (isKey ? outputRecord.key() : outputRecord.value()); assertMap(expected, actual, ""); }
Example 13
Source File: PatternRenameTest.java From kafka-connect-transform-common with Apache License 2.0 | 4 votes |
@Test public void prefixed() { this.transformation.configure( ImmutableMap.of( PatternRenameConfig.FIELD_PATTERN_CONF, "^prefixed", PatternRenameConfig.FIELD_REPLACEMENT_CONF, "" ) ); Schema inputSchema = SchemaBuilder.struct() .name("testing") .field("prefixedfirstname", Schema.STRING_SCHEMA) .field("prefixedlastname", Schema.STRING_SCHEMA); Struct inputStruct = new Struct(inputSchema) .put("prefixedfirstname", "example") .put("prefixedlastname", "user"); final Object key = isKey ? inputStruct : null; final Object value = isKey ? null : inputStruct; final Schema keySchema = isKey ? inputSchema : null; final Schema valueSchema = isKey ? null : inputSchema; final SinkRecord inputRecord = new SinkRecord( TOPIC, 1, keySchema, key, valueSchema, value, 1234L ); final SinkRecord outputRecord = this.transformation.apply(inputRecord); assertNotNull(outputRecord); final Schema actualSchema = isKey ? outputRecord.keySchema() : outputRecord.valueSchema(); final Struct actualStruct = (Struct) (isKey ? outputRecord.key() : outputRecord.value()); final Schema expectedSchema = SchemaBuilder.struct() .name("testing") .field("firstname", Schema.STRING_SCHEMA) .field("lastname", Schema.STRING_SCHEMA); Struct expectedStruct = new Struct(expectedSchema) .put("firstname", "example") .put("lastname", "user"); assertSchema(expectedSchema, actualSchema); assertStruct(expectedStruct, actualStruct); }