org.apache.kafka.connect.sink.SinkRecord Java Examples
The following examples show how to use
org.apache.kafka.connect.sink.SinkRecord.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: NormalizeSchemaTest.java From kafka-connect-transform-common with Apache License 2.0 | 7 votes |
SinkRecord exampleRecord(Schema schema) { Struct struct = new Struct(schema); for (Field field : schema.fields()) { struct.put(field, Strings.repeat("x", 50)); } return new SinkRecord( "test", 0, null, null, schema, struct, 1234L ); }
Example #2
Source File: CamelSinkTaskTest.java From camel-kafka-connector with Apache License 2.0 | 6 votes |
@Test public void testIfExchangeFailsShouldThrowConnectException() { Map<String, String> props = new HashMap<>(); props.put(CamelSinkConnectorConfig.TOPIC_CONF, TOPIC_NAME); // we use a dummy component sink in order fail the exchange delivery props.put(CamelSinkConnectorConfig.CAMEL_SINK_COMPONENT_CONF, "direct"); props.put(CamelSinkTask.getCamelSinkPathConfigPrefix() + "name", "test"); CamelSinkTask sinkTask = new CamelSinkTask(); sinkTask.start(props); List<SinkRecord> records = new ArrayList<SinkRecord>(); SinkRecord record = new SinkRecord(TOPIC_NAME, 1, null, "test", null, "camel", 42); records.add(record); assertThrows(ConnectException.class, () -> sinkTask.put(records)); sinkTask.stop(); }
Example #3
Source File: ProcessRecordTest.java From snowflake-kafka-connector with Apache License 2.0 | 6 votes |
@Test public void test() throws IOException { RecordService service = new RecordService(); SinkRecord record = new SinkRecord( topic, partition, testCase.key.schema(), testCase.key.value(), testCase.value.schema(), testCase.value.value(), partition ); String got = service.processRecord(record); assertEquals(testCase.expected, mapper.readTree(got)); }
Example #4
Source File: SplunkHttpSinkTaskTest.java From kafka-connect-splunk with Apache License 2.0 | 6 votes |
@Test public void normal() throws IOException { Collection<SinkRecord> sinkRecords = new ArrayList<>(); SinkRecordContentTest.addRecord(sinkRecords, ImmutableMap.of("host", "hostname.example.com")); SinkRecordContentTest.addRecord(sinkRecords, ImmutableMap.of("host", "hostname.example.com", "time", new Date(1472256858924L), "source", "testapp")); SinkRecordContentTest.addRecord(sinkRecords, ImmutableMap.of("host", "hostname.example.com", "time", new Date(1472256858924L), "source", "testapp", "sourcetype", "txt", "index", "main")); final LowLevelHttpRequest httpRequest = mock(LowLevelHttpRequest.class, CALLS_REAL_METHODS); LowLevelHttpResponse httpResponse = getResponse(200); when(httpRequest.execute()).thenReturn(httpResponse); this.task.transport = new MockHttpTransport() { @Override public LowLevelHttpRequest buildRequest(String method, String url) throws IOException { return httpRequest; } }; this.task.httpRequestFactory = this.task.transport.createRequestFactory(this.task.httpRequestInitializer); this.task.put(sinkRecords); }
Example #5
Source File: TimestampNowFieldTest.java From kafka-connect-transform-common with Apache License 2.0 | 6 votes |
@Test public void mapFieldMissing() { final Map<String, Object> expected = ImmutableMap.of( "firstName", "example", "lastName", "user", "timestamp", timestamp ); final SinkRecord input = new SinkRecord( "test", 1, null, null, null, ImmutableMap.of("firstName", "example", "lastName", "user"), 1234L ); final SinkRecord output = this.transformation.apply(input); assertNotNull(output, "output should not be null."); assertTrue(output.value() instanceof Map, "value should be a struct"); final Map<String, Object> actual = (Map<String, Object>) output.value(); assertEquals(expected, actual); }
Example #6
Source File: GcsSinkTaskTest.java From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 | 6 votes |
private SinkRecord createRecordStringKey(final String topic, final int partition, final String key, final String value, final int offset, final long timestamp) { return new SinkRecord( topic, partition, Schema.OPTIONAL_STRING_SCHEMA, key, Schema.BYTES_SCHEMA, value.getBytes(StandardCharsets.UTF_8), offset, timestamp, TimestampType.CREATE_TIME); }
Example #7
Source File: FileStreamSinkTaskTest.java From kafka-connector-skeleton with Apache License 2.0 | 6 votes |
@Test public void testPutFlush() { HashMap<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); // We do not call task.start() since it would override the output stream task.put(Arrays.asList( new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line1", 1) )); offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(1L)); task.flush(offsets); assertEquals("line1\n", os.toString()); task.put(Arrays.asList( new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line2", 2), new SinkRecord("topic2", 0, null, null, Schema.STRING_SCHEMA, "line3", 1) )); offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(2L)); offsets.put(new TopicPartition("topic2", 0), new OffsetAndMetadata(1L)); task.flush(offsets); assertEquals("line1\nline2\nline3\n", os.toString()); }
Example #8
Source File: TopicPartitionRecordGrouper.java From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 | 6 votes |
@Override public void put(final SinkRecord record) { Objects.requireNonNull(record, "record cannot be null"); final TopicPartition tp = new TopicPartition(record.topic(), record.kafkaPartition()); final SinkRecord currentHeadRecord = currentHeadRecords.computeIfAbsent(tp, ignored -> record); final String recordKey = generateRecordKey(tp, currentHeadRecord); if (shouldCreateNewFile(recordKey)) { // Create new file using this record as the head record. currentHeadRecords.put(tp, record); final String newRecordKey = generateRecordKey(tp, record); fileBuffers.computeIfAbsent(newRecordKey, ignored -> new ArrayList<>()).add(record); } else { fileBuffers.computeIfAbsent(recordKey, ignored -> new ArrayList<>()).add(record); } }
Example #9
Source File: IgniteSinkTask.java From ignite with Apache License 2.0 | 6 votes |
/** * Buffers records. * * @param records Records to inject into grid. */ @SuppressWarnings("unchecked") @Override public void put(Collection<SinkRecord> records) { try { for (SinkRecord record : records) { // Data is flushed asynchronously when CACHE_PER_NODE_DATA_SIZE is reached. if (extractor != null) { Map.Entry<Object, Object> entry = extractor.extract(record); StreamerContext.getStreamer().addData(entry.getKey(), entry.getValue()); } else { if (record.key() != null) { StreamerContext.getStreamer().addData(record.key(), record.value()); } else { log.error("Failed to stream a record with null key!"); } } } } catch (ConnectException e) { log.error("Failed adding record", e); throw new ConnectException(e); } }
Example #10
Source File: JsonPayloadFormatterTest.java From kafka-connect-lambda with Apache License 2.0 | 6 votes |
@Test public void testAvroAvroSinkRecordValueSchemaVisibilityAll() throws IOException { final SinkRecord record = createSinkRecord(keySchema, keyStruct, valueSchema, valueStruct); Map<String, String> map = new HashMap<>(); map.put(VALUE_SCHEMA_VISIBILITY_CONFIG, "all"); formatter.configure(map); final String result = formatter.format(record); debugShow(record, result); Payload payload = new Payload<>(); payload = mapper.readValue(result, payload.getClass()); // Only asserting on things that are addressed by the test. assertTrue(payload.getKey() instanceof HashMap); assertEquals(TEST_KEY_CLASS, payload.getKeySchemaName()); assertEquals(1234, Integer.parseInt(payload.getKeySchemaVersion())); assertTrue(payload.getValue() instanceof HashMap); assertEquals(TEST_VALUE_CLASS, payload.getValueSchemaName()); assertEquals(5678, Integer.parseInt(payload.getValueSchemaVersion())); }
Example #11
Source File: PlainPayloadFormatterTest.java From kafka-connect-lambda with Apache License 2.0 | 6 votes |
@Test public void testFormatBatchOfRecords() throws IOException { List<SinkRecord> records = Arrays.asList( new SinkRecord("test-topic", 1, null, "test-key1", null, "test-value1", 0), new SinkRecord("test-topic", 1, null, "test-key2", null, "test-value2", 1), new SinkRecord("test-topic", 1, null, "test-key3", null, "test-value3", 2) ); String result = formatter.format(records); PlainPayload[] payloads = mapper .readValue(result, PlainPayload[].class); assertEquals(3, payloads.length); for (int i = 0; i < payloads.length; i++) { assertEquals(i, payloads[i].getOffset()); } }
Example #12
Source File: SetNullTest.java From kafka-connect-transform-common with Apache License 2.0 | 6 votes |
@Test public void test() { final SinkRecord input = new SinkRecord( "test", 1, Schema.STRING_SCHEMA, "key", null, "", 1234123L, 12341312L, TimestampType.NO_TIMESTAMP_TYPE ); final Long expectedTimestamp = 1537808219123L; SetNull<SinkRecord> transform = new SetNull.Key<>(); final SinkRecord actual = transform.apply(input); assertNull(actual.key(), "key should be null."); assertNull(actual.keySchema(), "keySchema should be null."); }
Example #13
Source File: CloudPubSubSinkTaskTest.java From pubsub with Apache License 2.0 | 6 votes |
/** * Tests that if a Future that is being processed in flush() failed with an exception and then a * second Future is processed successfully in a subsequent flush, then the subsequent flush * succeeds. */ @Test public void testFlushExceptionThenNoExceptionCase() throws Exception { task.start(props); Map<TopicPartition, OffsetAndMetadata> partitionOffsets = new HashMap<>(); partitionOffsets.put(new TopicPartition(KAFKA_TOPIC, 0), null); List<SinkRecord> records = getSampleRecords(); ApiFuture<String> badFuture = getFailedPublishFuture(); ApiFuture<String> goodFuture = getSuccessfulPublishFuture(); when(publisher.publish(any(PubsubMessage.class))).thenReturn(badFuture).thenReturn(badFuture).thenReturn(goodFuture); task.put(records); try { task.flush(partitionOffsets); } catch (RuntimeException e) { } records = getSampleRecords(); task.put(records); task.flush(partitionOffsets); verify(publisher, times(4)).publish(any(PubsubMessage.class)); verify(badFuture, times(2)).addListener(any(Runnable.class), any(Executor.class)); verify(goodFuture, times(2)).addListener(any(Runnable.class), any(Executor.class)); }
Example #14
Source File: TimestampNowTest.java From kafka-connect-transform-common with Apache License 2.0 | 6 votes |
@Test public void test() { final SinkRecord input = new SinkRecord( "test", 1, null, "", null, "", 1234123L, 12341312L, TimestampType.NO_TIMESTAMP_TYPE ); final Long expectedTimestamp = 1537808219123L; TimestampNow<SinkRecord> transform = new TimestampNow<>(); transform.time = mock(Time.class); when(transform.time.milliseconds()).thenReturn(expectedTimestamp); final SinkRecord actual = transform.apply(input); assertEquals(expectedTimestamp, actual.timestamp(), "Timestamp should match."); verify(transform.time, times(1)).milliseconds(); }
Example #15
Source File: AbstractValueWriter.java From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 | 6 votes |
/** * Takes the {@link SinkRecord}'s value as a byte array. * * <p>If the value is {@code null}, it outputs nothing. * * <p>If the value is not {@code null}, it assumes the value <b>is</b> a byte array. * * @param record the record to get the value from * @param outputStream the stream to write to * @throws DataException when the value is not actually a byte array */ @Override public void write(final SinkRecord record, final OutputStream outputStream) throws IOException { Objects.requireNonNull(record, "record cannot be null"); Objects.requireNonNull(record.valueSchema(), "value schema cannot be null"); Objects.requireNonNull(outputStream, "outputStream cannot be null"); if (record.valueSchema().type() != Schema.Type.BYTES) { final String msg = String.format("Record value schema type must be %s, %s given", Schema.Type.BYTES, record.valueSchema().type()); throw new DataException(msg); } // Do nothing if the key is null. if (record.value() == null) { return; } if (!(record.value() instanceof byte[])) { throw new DataException("Value is not a byte array"); } outputStream.write(getOutputBytes((byte[]) record.value())); }
Example #16
Source File: HttpApiWriterTest.java From kafka-connect-http with Apache License 2.0 | 6 votes |
@Test public void headerSeparator() throws Exception { Map<String,String> properties = new HashMap<>(); int port = restHelper.getPort(); String endPoint = "/test"; String testUrl = "http://localhost:" + port + endPoint; properties.put(HttpSinkConfig.HTTP_API_URL, testUrl); properties.put(HttpSinkConfig.REQUEST_METHOD,HttpSinkConfig.RequestMethod.DELETE.toString()); properties.put(HttpSinkConfig.HEADERS,"Content-Type:application/json=Cache-Control:no-cache"); properties.put(HttpSinkConfig.HEADER_SEPERATOR,"="); HttpSinkConfig config = new HttpSinkConfig(properties); HttpApiWriter writer = new HttpApiWriter(config); List<SinkRecord> sinkRecords = new ArrayList<>(); String payload = "someValue"; sinkRecords.add(new SinkRecord("someTopic",0,null,"someKey",null, payload,0)); writer.write(sinkRecords); Assert.assertEquals(1,restHelper.getCapturedRequests().size()); for( RequestInfo requestInfo : restHelper.getCapturedRequests()) { Assert.assertThat(requestInfo.getHeaders(),hasItems("Content-Type:application/json")); Assert.assertThat(requestInfo.getHeaders(),hasItems("Cache-Control:no-cache")); } }
Example #17
Source File: GcsSinkTaskGroupByTopicPartitionPropertiesTest.java From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 | 6 votes |
private void genericTry(final List<List<SinkRecord>> recordBatches, final Integer maxRecordsPerFile) { final Storage storage = LocalStorageHelper.getOptions().getService(); final BucketAccessor testBucketAccessor = new BucketAccessor(storage, TEST_BUCKET, true); final Map<String, String> taskProps = basicTaskProps(); if (maxRecordsPerFile != null) { taskProps.put(GcsSinkConfig.FILE_MAX_RECORDS, Integer.toString(maxRecordsPerFile)); } final GcsSinkTask task = new GcsSinkTask(taskProps, storage); for (final List<SinkRecord> recordBatch : recordBatches) { task.put(recordBatch); task.flush(null); } checkExpectedFileNames(recordBatches, maxRecordsPerFile, testBucketAccessor); checkFileSizes(testBucketAccessor, maxRecordsPerFile); final int expectedRecordCount = recordBatches.stream().mapToInt(List::size).sum(); checkTotalRecordCountAndNoMultipleWrites(expectedRecordCount, testBucketAccessor); checkTopicPartitionPartInFileNames(testBucketAccessor); checkOffsetOrderInFiles(testBucketAccessor); }
Example #18
Source File: TopicPartitionWriter.java From streamx with Apache License 2.0 | 5 votes |
private void writeRecord(SinkRecord record) throws IOException { long expectedOffset = offset + recordCounter; if (offset == -1) { offset = record.kafkaOffset(); } else if (record.kafkaOffset() != expectedOffset) { // Currently it's possible to see stale data with the wrong offset after a rebalance when you // rewind, which we do since we manage our own offsets. See KAFKA-2894. if (!sawInvalidOffset) { log.info( "Ignoring stale out-of-order record in {}-{}. Has offset {} instead of expected offset {}", record.topic(), record.kafkaPartition(), record.kafkaOffset(), expectedOffset); } sawInvalidOffset = true; return; } if (sawInvalidOffset) { log.info( "Recovered from stale out-of-order records in {}-{} with offset {}", record.topic(), record.kafkaPartition(), expectedOffset); sawInvalidOffset = false; } String encodedPartition = partitioner.encodePartition(record); RecordWriter<SinkRecord> writer = getWriter(record, encodedPartition); writer.write(record); if (!startOffsets.containsKey(encodedPartition)) { startOffsets.put(encodedPartition, record.kafkaOffset()); offsets.put(encodedPartition, record.kafkaOffset()); } else { offsets.put(encodedPartition, record.kafkaOffset()); } recordCounter++; }
Example #19
Source File: JsonPayloadFormatterTest.java From kafka-connect-lambda with Apache License 2.0 | 5 votes |
@Test public void testNullStringSinkRecord() throws IOException { final SinkRecord record = createSinkRecord(null, null, null, TEST_VALUE); final String result = formatter.format(record); debugShow(record, result); Payload payload = new Payload<>(); payload = mapper.readValue(result, payload.getClass()); assertNull(payload.getKey()); assertNull(payload.getKeySchemaName()); assertEquals(TEST_VALUE, payload.getValue()); assertNull(payload.getValueSchemaName()); }
Example #20
Source File: KafkaMetaAdder.java From kafka-connect-mongodb with Apache License 2.0 | 5 votes |
@Override public void process(SinkDocument doc, SinkRecord orig) { doc.getValueDoc().ifPresent(vd -> { vd.put(KAFKA_META_DATA, new BsonString(orig.topic() + "-" + orig.kafkaPartition() + "-" + orig.kafkaOffset())); vd.put(orig.timestampType().name(), new BsonInt64(orig.timestamp())); }); getNext().ifPresent(pp -> pp.process(doc, orig)); }
Example #21
Source File: JsonPayloadFormatterTest.java From kafka-connect-lambda with Apache License 2.0 | 5 votes |
@Test public void testLongJsonSinkRecord() throws IOException { final SinkRecord record = createSinkRecord(null, 123L, null, TEST_VALUE_JSON); final String result = formatter.format(record); debugShow(record, result); Payload payload = new Payload<>(); payload = mapper.readValue(result, payload.getClass()); assertEquals(123L, Long.parseLong(payload.getKey().toString())); assertNull(payload.getKeySchemaName()); assertEquals(TEST_VALUE_JSON, payload.getValue()); assertNull(payload.getValueSchemaName()); }
Example #22
Source File: HttpApiWriterTest.java From kafka-connect-http with Apache License 2.0 | 5 votes |
@Test public void batchSentAtMaxSize() throws Exception { Map<String,String> properties = new HashMap<>(); int port = restHelper.getPort(); String endPoint = "/test"; String testUrl = "http://localhost:" + port + endPoint; properties.put(HttpSinkConfig.HTTP_API_URL, testUrl); properties.put(HttpSinkConfig.REQUEST_METHOD,HttpSinkConfig.RequestMethod.POST.toString()); properties.put(HttpSinkConfig.HEADERS,"Content-Type:application/json=Cache-Control:no-cache"); properties.put(HttpSinkConfig.HEADER_SEPERATOR,"="); properties.put(HttpSinkConfig.REGEX_PATTERNS,"^~$"); properties.put(HttpSinkConfig.REGEX_REPLACEMENTS,"${key}~${topic}"); properties.put(HttpSinkConfig.REGEX_SEPARATOR,"~"); properties.put(HttpSinkConfig.BATCH_MAX_SIZE,"2"); HttpSinkConfig config = new HttpSinkConfig(properties); HttpApiWriter writer = new HttpApiWriter(config); List<SinkRecord> sinkRecords = new ArrayList<>(); String payload1 = "someValue1"; String payload2 = "someValue2"; sinkRecords.add(new SinkRecord("someTopic",0,null,"someKey",null, payload1,0)); sinkRecords.add(new SinkRecord("someTopic",0,null,"someKey",null, payload2,0)); writer.write(sinkRecords); Assert.assertEquals(1,restHelper.getCapturedRequests().size()); RequestInfo request1 = restHelper.getCapturedRequests().get(0); Assert.assertEquals(HttpSinkConfig.RequestMethod.POST.toString(),request1.getMethod()); Assert.assertEquals("/test",request1.getUrl()); Assert.assertEquals("someKeysomeValue1someTopic,someKeysomeValue2someTopic",request1.getBody()); }
Example #23
Source File: HdfsSinkTask.java From streamx with Apache License 2.0 | 5 votes |
@Override public void put(Collection<SinkRecord> records) throws ConnectException { try { hdfsWriter.write(records); } catch (ConnectException e) { throw new ConnectException(e); } }
Example #24
Source File: MongoDbSinkRecordBatchesTest.java From kafka-connect-mongodb with Apache License 2.0 | 5 votes |
private static List<List<SinkRecord>> createBatchedSinkRecordList(List<SinkRecord> sinkRecordList, int batchSize) { if(batchSize > 0) { return Lists.partition(sinkRecordList,batchSize); } List<List<SinkRecord>> batchedList = new ArrayList<>(); batchedList.add(sinkRecordList); return batchedList; }
Example #25
Source File: PlainPayloadFormatterTest.java From kafka-connect-lambda with Apache License 2.0 | 5 votes |
@Test public void testNullStringSinkRecord() throws IOException { final SinkRecord record = createSinkRecord(null, null, null, TEST_VALUE); final PlainPayload payload = derivePayload(record); assertEquals("", payload.getKey()); assertNull(payload.getKeySchemaName()); assertEquals(TEST_VALUE, payload.getValue()); assertNull(payload.getValueSchemaName()); }
Example #26
Source File: MongoDbSinkTask.java From kafka-connect-mongodb with Apache License 2.0 | 5 votes |
List<? extends WriteModel<BsonDocument>> buildWriteModel(Collection<SinkRecord> records,String collectionName) { List<WriteModel<BsonDocument>> docsToWrite = new ArrayList<>(records.size()); LOGGER.debug("building write model for {} record(s)", records.size()); records.forEach(record -> { SinkDocument doc = sinkConverter.convert(record); processorChains.getOrDefault(collectionName, processorChains.get(MongoDbSinkConnectorConfig.TOPIC_AGNOSTIC_KEY_NAME)) .process(doc, record); if(doc.getValueDoc().isPresent()) { docsToWrite.add(writeModelStrategies.getOrDefault( collectionName, writeModelStrategies.get(MongoDbSinkConnectorConfig.TOPIC_AGNOSTIC_KEY_NAME) ).createWriteModel(doc,record) ); } else { if(doc.getKeyDoc().isPresent() && sinkConfig.isDeleteOnNullValues(record.topic())) { docsToWrite.add(deleteOneModelDefaultStrategies.getOrDefault(collectionName, deleteOneModelDefaultStrategies.get(MongoDbSinkConnectorConfig.TOPIC_AGNOSTIC_KEY_NAME)) .createWriteModel(doc) ); } else { LOGGER.error("skipping sink record "+record + "for which neither key doc nor value doc were present"); } } } ); return docsToWrite; }
Example #27
Source File: SinkRecordContentTest.java From kafka-connect-splunk with Apache License 2.0 | 5 votes |
@Test public void struct002() throws IOException { final Struct value = new Struct(EventConverter.VALUE_SCHEMA) .put("host", "hostname.example.com") .put("time", new Date(1472256858924L)) .put("source", "testapp") .put("sourcetype", "txt") .put("index", "main"); final SinkRecord record = record(value); final String expected = "{\"host\":\"hostname.example.com\",\"time\":1472256858.924,\"sourcetype\":\"txt\",\"index\":\"main\",\"source\":\"testapp\"}"; test(record, expected); }
Example #28
Source File: HttpApiWriterTest.java From kafka-connect-http with Apache License 2.0 | 5 votes |
@Test public void regexReplacement() throws Exception { Map<String,String> properties = new HashMap<>(); int port = restHelper.getPort(); String endPoint = "/test"; String testUrl = "http://localhost:" + port + endPoint; properties.put(HttpSinkConfig.HTTP_API_URL, testUrl); properties.put(HttpSinkConfig.REQUEST_METHOD,HttpSinkConfig.RequestMethod.POST.toString()); properties.put(HttpSinkConfig.HEADERS,"Content-Type:application/json=Cache-Control:no-cache"); properties.put(HttpSinkConfig.HEADER_SEPERATOR,"="); properties.put(HttpSinkConfig.REGEX_PATTERNS,"^~$"); properties.put(HttpSinkConfig.REGEX_REPLACEMENTS,"start~end"); properties.put(HttpSinkConfig.REGEX_SEPARATOR,"~"); HttpSinkConfig config = new HttpSinkConfig(properties); HttpApiWriter writer = new HttpApiWriter(config); List<SinkRecord> sinkRecords = new ArrayList<>(); String payload1 = "someValue1"; sinkRecords.add(new SinkRecord("someTopic",0,null,"someKey",null, payload1,0)); writer.write(sinkRecords); Assert.assertEquals(1,restHelper.getCapturedRequests().size()); RequestInfo request1 = restHelper.getCapturedRequests().get(0); Assert.assertEquals(HttpSinkConfig.RequestMethod.POST.toString(),request1.getMethod()); Assert.assertEquals("/test",request1.getUrl()); Assert.assertEquals("start" + payload1 + "end",request1.getBody()); }
Example #29
Source File: HBaseSinkTask.java From kafka-connect-hbase with Apache License 2.0 | 5 votes |
@Override public void put(Collection<SinkRecord> records) { Map<String, List<SinkRecord>> byTopic = records.stream() .collect(groupingBy(SinkRecord::topic)); Map<String, List<Put>> byTable = byTopic.entrySet().stream() .collect(toMap(Map.Entry::getKey, (e) -> e.getValue().stream().map(sr -> toPutFunction.apply(sr)).collect(toList()))); byTable.entrySet().parallelStream().forEach(entry -> { hBaseClient.write(entry.getKey(), entry.getValue()); }); }
Example #30
Source File: GenericRecordConverterTest.java From MongoDb-Sink-Connector with Apache License 2.0 | 5 votes |
@Test public void convertIntString() { expected.put("_id", "10"); expected.put("key", new BsonInt32(10)); expected.put("value", new BsonString("mystring")); SinkRecord record = new SinkRecord(null, 0, SchemaBuilder.int32().build(), 10, SchemaBuilder.string().build(), "mystring", 0L); assertEquals(expected, converter.convert(record)); }