Java Code Examples for org.apache.kafka.connect.data.Schema#OPTIONAL_BYTES_SCHEMA
The following examples show how to use
org.apache.kafka.connect.data.Schema#OPTIONAL_BYTES_SCHEMA .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaMonitor.java From mirus with BSD 3-Clause "New" or "Revised" License | 6 votes |
private String applyRoutersToTopic(String topic) { TopicPartition topicPartition = new TopicPartition(topic, 0); Map<String, Object> sourcePartition = TopicPartitionSerDe.asMap(topicPartition); SourceRecord record = new SourceRecord( sourcePartition, null, topicPartition.topic(), topicPartition.partition(), Schema.BYTES_SCHEMA, null, Schema.OPTIONAL_BYTES_SCHEMA, null); for (Transformation<SourceRecord> transform : this.routers) { record = transform.apply(record); } return record.topic(); }
Example 2
Source File: KafkaSinkTask.java From common-kafka with Apache License 2.0 | 6 votes |
@Override public void put(Collection<SinkRecord> collection) { // Any retriable exception thrown here will be attempted again and not cause the task to pause for(SinkRecord sinkRecord : collection) { if (sinkRecord.keySchema() != Schema.OPTIONAL_BYTES_SCHEMA || sinkRecord.valueSchema() != Schema.OPTIONAL_BYTES_SCHEMA) throw new IllegalStateException("Expected sink record key/value to be optional bytes, but saw instead key: " + sinkRecord.keySchema() + " value: " + sinkRecord.valueSchema() + ". Must use converter: " + "org.apache.kafka.connect.converters.ByteArrayConverter"); LOGGER.debug("Sending record {}", sinkRecord); try { producer.send(new ProducerRecord<>(sinkRecord.topic(), sinkRecord.kafkaPartition(), (byte[]) sinkRecord.key(), (byte[]) sinkRecord.value())); } catch (KafkaException e) { // If send throws an exception ensure we always retry the record/collection throw new RetriableException(e); } } }
Example 3
Source File: UnivocityFileReader.java From kafka-connect-fs with Apache License 2.0 | 6 votes |
private Schema strToSchema(String dataType) { switch (DataType.valueOf(dataType.trim().toUpperCase())) { case BYTE: return dataTypeMappingError && !allowNulls ? Schema.INT8_SCHEMA : Schema.OPTIONAL_INT8_SCHEMA; case SHORT: return dataTypeMappingError && !allowNulls ? Schema.INT16_SCHEMA : Schema.OPTIONAL_INT16_SCHEMA; case INT: return dataTypeMappingError && !allowNulls ? Schema.INT32_SCHEMA : Schema.OPTIONAL_INT32_SCHEMA; case LONG: return dataTypeMappingError && !allowNulls ? Schema.INT64_SCHEMA : Schema.OPTIONAL_INT64_SCHEMA; case FLOAT: return dataTypeMappingError && !allowNulls ? Schema.FLOAT32_SCHEMA : Schema.OPTIONAL_FLOAT32_SCHEMA; case DOUBLE: return dataTypeMappingError && !allowNulls ? Schema.FLOAT64_SCHEMA : Schema.OPTIONAL_FLOAT64_SCHEMA; case BOOLEAN: return dataTypeMappingError && !allowNulls ? Schema.BOOLEAN_SCHEMA : Schema.OPTIONAL_BOOLEAN_SCHEMA; case BYTES: return dataTypeMappingError && !allowNulls ? Schema.BYTES_SCHEMA : Schema.OPTIONAL_BYTES_SCHEMA; case STRING: default: return dataTypeMappingError && !allowNulls ? Schema.STRING_SCHEMA : Schema.OPTIONAL_STRING_SCHEMA; } }
Example 4
Source File: ToJSON.java From kafka-connect-transform-common with Apache License 2.0 | 6 votes |
SchemaAndValue schemaAndValue(Schema inputSchema, Object input) { final byte[] buffer = this.converter.fromConnectData("dummy", inputSchema, input); final Schema schema; final Object value; switch (this.config.outputSchema) { case STRING: value = new String(buffer, Charsets.UTF_8); schema = Schema.OPTIONAL_STRING_SCHEMA; break; case BYTES: value = buffer; schema = Schema.OPTIONAL_BYTES_SCHEMA; break; default: throw new UnsupportedOperationException( String.format( "Schema type (%s)'%s' is not supported.", ToJSONConfig.OUTPUT_SCHEMA_CONFIG, this.config.outputSchema ) ); } return new SchemaAndValue(schema, value); }
Example 5
Source File: BackupSourceTask.java From kafka-backup with Apache License 2.0 | 5 votes |
private SourceRecord toSourceRecord(Record record) { Map<String, String> sourcePartition = new HashMap<>(); sourcePartition.put(SOURCE_PARTITION_PARTITION, record.kafkaPartition().toString()); sourcePartition.put(SOURCE_PARTITION_TOPIC, record.topic()); Map<String, Long> sourceOffset = Collections.singletonMap(SOURCE_OFFSET_OFFSET, record.kafkaOffset()); ConnectHeaders connectHeaders = new ConnectHeaders(); for (Header header : record.headers()) { connectHeaders.addBytes(header.key(), header.value()); } return new SourceRecord(sourcePartition, sourceOffset, record.topic(), record.kafkaPartition(), Schema.OPTIONAL_BYTES_SCHEMA, record.key(), Schema.OPTIONAL_BYTES_SCHEMA, record.value(), record.timestamp(), connectHeaders); }
Example 6
Source File: Record.java From kafka-backup with Apache License 2.0 | 5 votes |
public SinkRecord toSinkRecord() { ConnectHeaders connectHeaders = new ConnectHeaders(); for (Header header : headers) { connectHeaders.addBytes(header.key(), header.value()); } return new SinkRecord(topic, kafkaPartition, Schema.OPTIONAL_BYTES_SCHEMA, key, Schema.OPTIONAL_BYTES_SCHEMA, value, kafkaOffset, timestamp, timestampType, connectHeaders); }
Example 7
Source File: KafkaSinkTaskTest.java From common-kafka with Apache License 2.0 | 5 votes |
@Before public void before() { task = new KafkaSinkTaskTester(); // Doesn't matter what we provide it since everything is mocked just need to call start task.start(Collections.emptyMap()); sinkRecord = new SinkRecord("topic", 0, Schema.OPTIONAL_BYTES_SCHEMA, "key".getBytes(), Schema.OPTIONAL_BYTES_SCHEMA, "value".getBytes(), 0L); when(kafkaProducer.send(anyObject())).thenReturn(recordMetadataFuture); }
Example 8
Source File: DefaultRecordBuilder.java From kafka-connect-mq-source with Apache License 2.0 | 5 votes |
/** * Gets the value schema to use for the Kafka Connect SourceRecord. * * @param context the JMS context to use for building messages * @param topic the Kafka topic * @param messageBodyJms whether to interpret MQ messages as JMS messages * @param message the message * * @return the Kafka Connect SourceRecord's value * * @throws JMSException Message could not be converted */ @Override public SchemaAndValue getValue(JMSContext context, String topic, boolean messageBodyJms, Message message) throws JMSException { Schema valueSchema = null; Object value = null; // Interpreting the body as a JMS message type, we can accept BytesMessage and TextMessage only. // We do not know the schema so do not specify one. if (messageBodyJms) { if (message instanceof BytesMessage) { log.debug("Bytes message with no schema"); value = message.getBody(byte[].class); } else if (message instanceof TextMessage) { log.debug("Text message with no schema"); value = message.getBody(String.class); } else { log.error("Unsupported JMS message type {}", message.getClass()); throw new ConnectException("Unsupported JMS message type"); } } else { // Not interpreting the body as a JMS message type, all messages come through as BytesMessage. // In this case, we specify the value schema as OPTIONAL_BYTES. log.debug("Bytes message with OPTIONAL_BYTES schema"); valueSchema = Schema.OPTIONAL_BYTES_SCHEMA; value = message.getBody(byte[].class); } return new SchemaAndValue(valueSchema, value); }
Example 9
Source File: ByteArrayConverter.java From streamx with Apache License 2.0 | 5 votes |
public SchemaAndValue toConnectData(String topic, byte[] value) { try { return new SchemaAndValue(Schema.OPTIONAL_BYTES_SCHEMA, this.deserializer.deserialize(topic, value)); } catch (SerializationException var4) { throw new DataException("Failed to deserialize byte: ", var4); } }
Example 10
Source File: KafkaSinkTaskTest.java From common-kafka with Apache License 2.0 | 4 votes |
@Test (expected = IllegalStateException.class) public void put_recordKeyIsNotNullOrBytes() { sinkRecord = new SinkRecord("topic", 0, Schema.STRING_SCHEMA, "key", Schema.OPTIONAL_BYTES_SCHEMA, "value".getBytes(), 0L); task.put(Collections.singletonList(sinkRecord)); }
Example 11
Source File: KafkaSinkTaskTest.java From common-kafka with Apache License 2.0 | 4 votes |
@Test (expected = IllegalStateException.class) public void put_recordValueIsNotNullOrBytes() { sinkRecord = new SinkRecord("topic", 0, Schema.OPTIONAL_BYTES_SCHEMA, "key".getBytes(), Schema.STRING_SCHEMA, "value", 0L); task.put(Collections.singletonList(sinkRecord)); }
Example 12
Source File: BaseRecordBuilder.java From kafka-connect-mq-source with Apache License 2.0 | 4 votes |
/** * Gets the key to use for the Kafka Connect SourceRecord. * * @param context the JMS context to use for building messages * @param topic the Kafka topic * @param message the message * * @return the Kafka Connect SourceRecord's key * * @throws JMSException Message could not be converted */ public SchemaAndValue getKey(JMSContext context, String topic, Message message) throws JMSException { Schema keySchema = null; Object key = null; String keystr; switch (keyheader) { case MESSAGE_ID: keySchema = Schema.OPTIONAL_STRING_SCHEMA; keystr = message.getJMSMessageID(); if (keystr.startsWith("ID:", 0)) { key = keystr.substring(3); } else { key = keystr; } break; case CORRELATION_ID: keySchema = Schema.OPTIONAL_STRING_SCHEMA; keystr = message.getJMSCorrelationID(); if (keystr.startsWith("ID:", 0)) { key = keystr.substring(3); } else { key = keystr; } break; case CORRELATION_ID_AS_BYTES: keySchema = Schema.OPTIONAL_BYTES_SCHEMA; key = message.getJMSCorrelationIDAsBytes(); break; case DESTINATION: keySchema = Schema.OPTIONAL_STRING_SCHEMA; key = message.getJMSDestination().toString(); break; default: break; } return new SchemaAndValue(keySchema, key); }
Example 13
Source File: JsonFileReader.java From kafka-connect-fs with Apache License 2.0 | 4 votes |
private static Schema extractSchema(JsonNode jsonNode) { switch (jsonNode.getNodeType()) { case BOOLEAN: return Schema.OPTIONAL_BOOLEAN_SCHEMA; case NUMBER: if (jsonNode.isShort()) { return Schema.OPTIONAL_INT8_SCHEMA; } else if (jsonNode.isInt()) { return Schema.OPTIONAL_INT32_SCHEMA; } else if (jsonNode.isLong()) { return Schema.OPTIONAL_INT64_SCHEMA; } else if (jsonNode.isFloat()) { return Schema.OPTIONAL_FLOAT32_SCHEMA; } else if (jsonNode.isDouble()) { return Schema.OPTIONAL_FLOAT64_SCHEMA; } else if (jsonNode.isBigInteger()) { return Schema.OPTIONAL_INT64_SCHEMA; } else if (jsonNode.isBigDecimal()) { return Schema.OPTIONAL_FLOAT64_SCHEMA; } else { return Schema.OPTIONAL_FLOAT64_SCHEMA; } case STRING: return Schema.OPTIONAL_STRING_SCHEMA; case BINARY: return Schema.OPTIONAL_BYTES_SCHEMA; case ARRAY: Iterable<JsonNode> elements = jsonNode::elements; Schema arraySchema = StreamSupport.stream(elements.spliterator(), false) .findFirst().map(JsonFileReader::extractSchema) .orElse(SchemaBuilder.struct().build()); return SchemaBuilder.array(arraySchema).build(); case OBJECT: SchemaBuilder builder = SchemaBuilder.struct(); jsonNode.fields() .forEachRemaining(field -> builder.field(field.getKey(), extractSchema(field.getValue()))); return builder.build(); default: return SchemaBuilder.struct().optional().build(); } }