Java Code Examples for org.apache.kafka.connect.data.Schema#STRING_SCHEMA
The following examples show how to use
org.apache.kafka.connect.data.Schema#STRING_SCHEMA .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TopicPartitionCounterTest.java From connect-utils with Apache License 2.0 | 6 votes |
@Test public void incrementSinkRecord() { final TopicPartition topicPartition = new TopicPartition("test", 1); final Map<TopicPartition, Long> expected = ImmutableMap.of( topicPartition, 123L ); SinkRecord record = new SinkRecord( topicPartition.topic(), topicPartition.partition(), Schema.STRING_SCHEMA, "", Schema.STRING_SCHEMA, "", 123L ); this.counter.increment(record); assertEquals(expected, this.counter.data()); }
Example 2
Source File: CamelTypeConverterTransformTest.java From camel-kafka-connector with Apache License 2.0 | 6 votes |
@Test public void testIfItConvertsConnectRecordCorrectly() { final SourceRecord connectRecord = new SourceRecord(Collections.emptyMap(), Collections.emptyMap(), "topic", Schema.STRING_SCHEMA, "1234", Schema.STRING_SCHEMA, "TRUE"); final Map<String, Object> propsForKeySmt = new HashMap<>(); propsForKeySmt.put(CamelTypeConverterTransform.FIELD_TARGET_TYPE_CONFIG, Integer.class.getName()); final Map<String, Object> propsForValueSmt = new HashMap<>(); propsForValueSmt.put(CamelTypeConverterTransform.FIELD_TARGET_TYPE_CONFIG, "java.lang.Boolean"); final Transformation<SourceRecord> transformationKey = new CamelTypeConverterTransform.Key<>(); final Transformation<SourceRecord> transformationValue = new CamelTypeConverterTransform.Value<>(); transformationKey.configure(propsForKeySmt); transformationValue.configure(propsForValueSmt); final SourceRecord transformedKeySourceRecord = transformationKey.apply(connectRecord); final SourceRecord transformedValueSourceRecord = transformationValue.apply(connectRecord); assertEquals(1234, transformedKeySourceRecord.key()); assertEquals(Schema.INT32_SCHEMA, transformedKeySourceRecord.keySchema()); assertEquals(true, transformedValueSourceRecord.value()); assertEquals(Schema.BOOLEAN_SCHEMA, transformedValueSourceRecord.valueSchema()); }
Example 3
Source File: SerDeUtil.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
private static Schema getKsqlSchemaForAvroSchema(org.apache.avro.Schema avroSchema) { switch (avroSchema.getType()) { case INT: return Schema.INT32_SCHEMA; case LONG: return Schema.INT64_SCHEMA; case DOUBLE: case FLOAT: return Schema.FLOAT64_SCHEMA; case BOOLEAN: return Schema.BOOLEAN_SCHEMA; case STRING: return Schema.STRING_SCHEMA; case ARRAY: return SchemaBuilder.array(getKsqlSchemaForAvroSchema(avroSchema.getElementType())); case MAP: return SchemaBuilder.map(Schema.STRING_SCHEMA, getKsqlSchemaForAvroSchema(avroSchema.getValueType())); case UNION: return handleUnion(avroSchema); default: throw new KsqlException(String.format("KSQL doesn't currently support Avro type: %s", avroSchema.getFullName())); } }
Example 4
Source File: SqlToJavaVisitor.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
@Override protected Pair<String, Schema> visitStringLiteral( final StringLiteral node, final Boolean unmangleNames ) { return new Pair<>("\"" + node.getValue() + "\"", Schema.STRING_SCHEMA); }
Example 5
Source File: BaseKeyValueTransformationTest.java From connect-utils with Apache License 2.0 | 5 votes |
@Test public void test() { StringTransformation transformation = new StringTransformation(); SinkRecord record = new SinkRecord( "testing", 1, Schema.STRING_SCHEMA, "foo", null, null, 123451L ); transformation.apply(record); }
Example 6
Source File: DataWriterParquetTest.java From streamx with Apache License 2.0 | 5 votes |
@Test public void testWriteRecord() throws Exception { DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); Partitioner partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); String key = "key"; Schema schema = createSchema(); Struct record = createRecord(schema); Collection<SinkRecord> sinkRecords = new ArrayList<>(); for (long offset = 0; offset < 7; offset++) { SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); sinkRecords.add(sinkRecord); } hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); String encodedPartition = "partition=" + String.valueOf(PARTITION); String directory = partitioner.generatePartitionedPath(TOPIC, encodedPartition); // Last file (offset 6) doesn't satisfy size requirement and gets discarded on close long[] validOffsets = {-1, 2, 5}; for (int i = 1; i < validOffsets.length; i++) { long startOffset = validOffsets[i - 1] + 1; long endOffset = validOffsets[i]; Path path = new Path( FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, startOffset, endOffset, extension, ZERO_PAD_FMT)); Collection<Object> records = schemaFileReader.readData(conf, path); long size = endOffset - startOffset + 1; assertEquals(size, records.size()); for (Object avroRecord : records) { assertEquals(avroData.fromConnectData(schema, record), avroRecord); } } }
Example 7
Source File: MongoDbSinkTaskTest.java From MongoDb-Sink-Connector with Apache License 2.0 | 5 votes |
@Test public void start() { MongoDbSinkTask sinkTask = spy(MongoDbSinkTask.class); MongoDbWriter writer = mock(MongoDbWriter.class); CreateMongoDbAnswer answer = new CreateMongoDbAnswer(writer); //noinspection unchecked doAnswer(answer).when(sinkTask) .createMongoDbWriter(any(), any(), anyInt(), anyLong(), any(), any()); Map<String, String> config = new HashMap<>(); config.put(MONGO_DATABASE, "db"); config.put(MONGO_HOST, "localhost"); config.put(TOPICS_CONFIG, "t"); config.put(BUFFER_CAPACITY, String.valueOf(10)); config.put(RECORD_CONVERTER, RecordConverterFactory.class.getName()); sinkTask.start(config); assertEquals(1, answer.timesCalled); assertEquals(10, answer.foundBuffer.remainingCapacity()); assertEquals(RecordConverterFactory.class, answer.foundFactory.getClass()); SinkRecord record = new SinkRecord("t", 1, Schema.STRING_SCHEMA, "k", Schema.STRING_SCHEMA, "v", 1000L); sinkTask.put(Collections.singleton(record)); assertEquals(9, answer.foundBuffer.remainingCapacity()); SinkRecord foundRecord = answer.foundBuffer.poll(); assertEquals(record, foundRecord); assertEquals(10, answer.foundBuffer.remainingCapacity()); sinkTask.stop(); }
Example 8
Source File: CamelTypeConverterTransformTest.java From camel-kafka-connector with Apache License 2.0 | 5 votes |
@Test public void testIfHandlesTypeConvertersFromCamelComponents() { // we know we have a type converter from struct to map in dbz component, so we use this for testing final Schema schema = SchemaBuilder.struct() .field("id", Schema.INT32_SCHEMA) .field("name", Schema.STRING_SCHEMA) .field("valid", Schema.BOOLEAN_SCHEMA) .field("extra", Schema.STRING_SCHEMA) .build(); final Struct value = new Struct(schema); value.put("id", 12); value.put("name", "test-name"); value.put("valid", true); final SourceRecord connectRecord = new SourceRecord(Collections.emptyMap(), Collections.emptyMap(), "topic", Schema.STRING_SCHEMA, "1234", schema, value); final Map<String, Object> props = new HashMap<>(); props.put(CamelTypeConverterTransform.FIELD_TARGET_TYPE_CONFIG, Map.class.getName()); final Transformation<SourceRecord> transformationValue = new CamelTypeConverterTransform.Value<>(); transformationValue.configure(props); final SourceRecord transformedValueSourceRecord = transformationValue.apply(connectRecord); // assert assertNotNull(transformedValueSourceRecord); final Map<String, Object> outputValue = (Map<String, Object>) transformedValueSourceRecord.value(); assertEquals(12, outputValue.get("id")); assertEquals("test-name", outputValue.get("name")); assertNull(outputValue.get("extra")); assertTrue((boolean)outputValue.get("valid")); assertEquals(Schema.Type.MAP, transformedValueSourceRecord.valueSchema().type()); }
Example 9
Source File: FailureRecoveryTest.java From streamx with Apache License 2.0 | 4 votes |
@Test public void testCommitFailure() throws Exception { Map<String, String> props = createProps(); HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props); String key = "key"; Schema schema = createSchema(); Struct record = createRecord(schema); Collection<SinkRecord> sinkRecords = new ArrayList<>(); for (long offset = 0; offset < 7; offset++) { SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); sinkRecords.add(sinkRecord); } DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); MemoryStorage storage = (MemoryStorage) hdfsWriter.getStorage(); storage.setFailure(MemoryStorage.Failure.appendFailure); hdfsWriter.write(sinkRecords); assertEquals(context.timeout(), (long) connectorConfig.getLong(HdfsSinkConnectorConfig.RETRY_BACKOFF_CONFIG)); Map<String, List<Object>> data = Data.getData(); String logFile = FileUtils.logFileName(url, logsDir, TOPIC_PARTITION); List<Object> content = data.get(logFile); assertEquals(null, content); hdfsWriter.write(new ArrayList<SinkRecord>()); content = data.get(logFile); assertEquals(null, content); Thread.sleep(context.timeout()); hdfsWriter.write(new ArrayList<SinkRecord>()); content = data.get(logFile); assertEquals(6, content.size()); hdfsWriter.close(assignment); hdfsWriter.stop(); }
Example 10
Source File: StringFieldConverter.java From kafka-connect-mongodb with Apache License 2.0 | 4 votes |
public StringFieldConverter() { super(Schema.STRING_SCHEMA); }
Example 11
Source File: DataWriterAvroTest.java From streamx with Apache License 2.0 | 4 votes |
@Test public void testWriteRecord() throws Exception { DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); Partitioner partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); String key = "key"; Schema schema = createSchema(); Struct record = createRecord(schema); Collection<SinkRecord> sinkRecords = new ArrayList<>(); for (long offset = 0; offset < 7; offset++) { SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); sinkRecords.add(sinkRecord); } hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); String encodedPartition = "partition=" + String.valueOf(PARTITION); String directory = partitioner.generatePartitionedPath(TOPIC, encodedPartition); // Last file (offset 6) doesn't satisfy size requirement and gets discarded on close long[] validOffsets = {-1, 2, 5}; for (int i = 1; i < validOffsets.length; i++) { long startOffset = validOffsets[i - 1] + 1; long endOffset = validOffsets[i]; Path path = new Path(FileUtils .committedFileName(url, topicsDir, directory, TOPIC_PARTITION, startOffset, endOffset, extension, ZERO_PAD_FMT)); Collection<Object> records = schemaFileReader.readData(conf, path); long size = endOffset - startOffset + 1; assertEquals(size, records.size()); for (Object avroRecord : records) { assertEquals(avroData.fromConnectData(schema, record), avroRecord); } } }
Example 12
Source File: HiveIntegrationAvroTest.java From streamx with Apache License 2.0 | 4 votes |
@Test public void testHiveIntegrationFieldPartitionerAvro() throws Exception { Map<String, String> props = createProps(); props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true"); props.put(HdfsSinkConnectorConfig.PARTITIONER_CLASS_CONFIG, FieldPartitioner.class.getName()); props.put(HdfsSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG, "int"); HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props); DataWriter hdfsWriter = new DataWriter(config, context, avroData); String key = "key"; Schema schema = createSchema(); Struct[] records = createRecords(schema); ArrayList<SinkRecord> sinkRecords = new ArrayList<>(); long offset = 0; for (Struct record : records) { for (long count = 0; count < 3; count++) { SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset + count); sinkRecords.add(sinkRecord); } offset = offset + 3; } hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); List<String> expectedColumnNames = new ArrayList<>(); for (Field field: schema.fields()) { expectedColumnNames.add(field.name()); } List<String> actualColumnNames = new ArrayList<>(); for (FieldSchema column: table.getSd().getCols()) { actualColumnNames.add(column.getName()); } assertEquals(expectedColumnNames, actualColumnNames); String partitionFieldName = config.getString(HdfsSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG); String directory1 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(16); String directory2 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(17); String directory3 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(18); List<String> expectedPartitions = new ArrayList<>(); expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory1)); expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory2)); expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory3)); List<String> partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC, (short)-1); assertEquals(expectedPartitions, partitions); ArrayList<String[]> expectedResult = new ArrayList<>(); for (int i = 16; i <= 18; ++i) { String[] part = {"true", String.valueOf(i), "12", "12.2", "12.2"}; for (int j = 0; j < 3; ++j) { expectedResult.add(part); } } String result = HiveTestUtils.runHive(hiveExec, "SELECT * FROM " + TOPIC); String[] rows = result.split("\n"); assertEquals(9, rows.length); for (int i = 0; i < rows.length; ++i) { String[] parts = HiveTestUtils.parseOutput(rows[i]); for (int j = 0; j < expectedResult.get(i).length; ++j) { assertEquals(expectedResult.get(i)[j], parts[j]); } } }
Example 13
Source File: DumbProcessor.java From kafka-connect-mqtt with MIT License | 4 votes |
@Override public SourceRecord[] getRecords(String kafkaTopic) { return new SourceRecord[]{new SourceRecord(null, null, kafkaTopic, null, Schema.STRING_SCHEMA, mTopic, Schema.BYTES_SCHEMA, mMessage.getPayload())}; }
Example 14
Source File: ProcessRecordTest.java From snowflake-kafka-connector with Apache License 2.0 | 4 votes |
public static SchemaAndValue getNull() { return new SchemaAndValue(Schema.STRING_SCHEMA, null); }
Example 15
Source File: DataWriterAvroTest.java From streamx with Apache License 2.0 | 4 votes |
@Test public void testFlushPartialFile() throws Exception { String ROTATE_INTERVAL_MS_CONFIG = "1000"; // wait for 2 * ROTATE_INTERVAL_MS_CONFIG long WAIT_TIME = Long.valueOf(ROTATE_INTERVAL_MS_CONFIG) * 2; String FLUSH_SIZE_CONFIG = "10"; // send 1.5 * FLUSH_SIZE_CONFIG records long NUMBER_OF_RECORD = Long.valueOf(FLUSH_SIZE_CONFIG) + Long.valueOf(FLUSH_SIZE_CONFIG) / 2; Map<String, String> props = createProps(); props.put(HdfsSinkConnectorConfig.FLUSH_SIZE_CONFIG, FLUSH_SIZE_CONFIG); props.put(HdfsSinkConnectorConfig.ROTATE_INTERVAL_MS_CONFIG, ROTATE_INTERVAL_MS_CONFIG); HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props); assignment = new HashSet<>(); assignment.add(TOPIC_PARTITION); DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); hdfsWriter.recover(TOPIC_PARTITION); String key = "key"; Schema schema = createSchema(); Struct record = createRecord(schema); Collection<SinkRecord> sinkRecords = new ArrayList<>(); for (long offset = 0; offset < NUMBER_OF_RECORD; offset++) { SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); sinkRecords.add(sinkRecord); } hdfsWriter.write(sinkRecords); // wait for rotation to happen long start = System.currentTimeMillis(); long end = start + WAIT_TIME; while(System.currentTimeMillis() < end) { List<SinkRecord> messageBatch = new ArrayList<>(); hdfsWriter.write(messageBatch); } Map<TopicPartition, Long> committedOffsets = hdfsWriter.getCommittedOffsets(); assertTrue(committedOffsets.containsKey(TOPIC_PARTITION)); long previousOffset = committedOffsets.get(TOPIC_PARTITION); assertEquals(NUMBER_OF_RECORD, previousOffset); hdfsWriter.close(assignment); hdfsWriter.stop(); }
Example 16
Source File: HiveIntegrationParquetTest.java From streamx with Apache License 2.0 | 4 votes |
@Test public void testHiveIntegrationFieldPartitionerParquet() throws Exception { Map<String, String> props = createProps(); props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true"); props.put(HdfsSinkConnectorConfig.PARTITIONER_CLASS_CONFIG, FieldPartitioner.class.getName()); props.put(HdfsSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG, "int"); HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props); DataWriter hdfsWriter = new DataWriter(config, context, avroData); String key = "key"; Schema schema = createSchema(); Struct[] records = createRecords(schema); ArrayList<SinkRecord> sinkRecords = new ArrayList<>(); long offset = 0; for (Struct record : records) { for (long count = 0; count < 3; count++) { SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset + count); sinkRecords.add(sinkRecord); } offset = offset + 3; } hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); List<String> expectedColumnNames = new ArrayList<>(); for (Field field: schema.fields()) { expectedColumnNames.add(field.name()); } List<String> actualColumnNames = new ArrayList<>(); for (FieldSchema column: table.getSd().getCols()) { actualColumnNames.add(column.getName()); } assertEquals(expectedColumnNames, actualColumnNames); String partitionFieldName = config.getString(HdfsSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG); String directory1 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(16); String directory2 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(17); String directory3 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(18); List<String> expectedPartitions = new ArrayList<>(); expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory1)); expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory2)); expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory3)); List<String> partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC, (short)-1); assertEquals(expectedPartitions, partitions); ArrayList<String[]> expectedResult = new ArrayList<>(); for (int i = 16; i <= 18; ++i) { String[] part = {"true", String.valueOf(i), "12", "12.2", "12.2"}; for (int j = 0; j < 3; ++j) { expectedResult.add(part); } } String result = HiveTestUtils.runHive(hiveExec, "SELECT * FROM " + TOPIC); String[] rows = result.split("\n"); assertEquals(9, rows.length); for (int i = 0; i < rows.length; ++i) { String[] parts = HiveTestUtils.parseOutput(rows[i]); for (int j = 0; j < expectedResult.get(i).length; ++j) { assertEquals(expectedResult.get(i)[j], parts[j]); } } }
Example 17
Source File: BytesToString.java From kafka-connect-transform-common with Apache License 2.0 | 4 votes |
@Override protected SchemaAndValue processBytes(R record, Schema inputSchema, byte[] input) { final Schema outputSchema = inputSchema.isOptional() ? Schema.OPTIONAL_STRING_SCHEMA : Schema.STRING_SCHEMA; final String output = new String(input, this.config.charset); return new SchemaAndValue(outputSchema, output); }
Example 18
Source File: KafkaSinkTaskTest.java From common-kafka with Apache License 2.0 | 4 votes |
@Test (expected = IllegalStateException.class) public void put_recordKeyIsNotNullOrBytes() { sinkRecord = new SinkRecord("topic", 0, Schema.STRING_SCHEMA, "key", Schema.OPTIONAL_BYTES_SCHEMA, "value".getBytes(), 0L); task.put(Collections.singletonList(sinkRecord)); }
Example 19
Source File: HiveIntegrationParquetTest.java From streamx with Apache License 2.0 | 4 votes |
@Test public void testHiveIntegrationParquet() throws Exception { Map<String, String> props = createProps(); props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true"); HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props); DataWriter hdfsWriter = new DataWriter(config, context, avroData); hdfsWriter.recover(TOPIC_PARTITION); String key = "key"; Schema schema = createSchema(); Struct record = createRecord(schema); Collection<SinkRecord> sinkRecords = new ArrayList<>(); for (long offset = 0; offset < 7; offset++) { SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); sinkRecords.add(sinkRecord); } hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); List<String> expectedColumnNames = new ArrayList<>(); for (Field field: schema.fields()) { expectedColumnNames.add(field.name()); } List<String> actualColumnNames = new ArrayList<>(); for (FieldSchema column: table.getSd().getCols()) { actualColumnNames.add(column.getName()); } assertEquals(expectedColumnNames, actualColumnNames); List<String> expectedPartitions = new ArrayList<>(); String directory = TOPIC + "/" + "partition=" + String.valueOf(PARTITION); expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory)); List<String> partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC, (short)-1); assertEquals(expectedPartitions, partitions); }
Example 20
Source File: HiveIntegrationAvroTest.java From streamx with Apache License 2.0 | 4 votes |
@Test public void testHiveIntegrationAvro() throws Exception { Map<String, String> props = createProps(); props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true"); HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props); DataWriter hdfsWriter = new DataWriter(config, context, avroData); hdfsWriter.recover(TOPIC_PARTITION); String key = "key"; Schema schema = createSchema(); Struct record = createRecord(schema); Collection<SinkRecord> sinkRecords = new ArrayList<>(); for (long offset = 0; offset < 7; offset++) { SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); sinkRecords.add(sinkRecord); } hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); List<String> expectedColumnNames = new ArrayList<>(); for (Field field: schema.fields()) { expectedColumnNames.add(field.name()); } List<String> actualColumnNames = new ArrayList<>(); for (FieldSchema column: table.getSd().getCols()) { actualColumnNames.add(column.getName()); } assertEquals(expectedColumnNames, actualColumnNames); List<String> expectedPartitions = new ArrayList<>(); String directory = TOPIC + "/" + "partition=" + String.valueOf(PARTITION); expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory)); List<String> partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC, (short)-1); assertEquals(expectedPartitions, partitions); }