Java Code Examples for org.apache.kafka.connect.data.Schema#OPTIONAL_STRING_SCHEMA
The following examples show how to use
org.apache.kafka.connect.data.Schema#OPTIONAL_STRING_SCHEMA .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StringParserTest.java From connect-utils with Apache License 2.0 | 7 votes |
@Test public void nullableTests() { final Schema[] schemas = new Schema[]{ Schema.OPTIONAL_BOOLEAN_SCHEMA, Schema.OPTIONAL_FLOAT32_SCHEMA, Schema.OPTIONAL_FLOAT64_SCHEMA, Schema.OPTIONAL_INT8_SCHEMA, Schema.OPTIONAL_INT16_SCHEMA, Schema.OPTIONAL_INT32_SCHEMA, Schema.OPTIONAL_INT64_SCHEMA, Schema.OPTIONAL_STRING_SCHEMA, Decimal.builder(1).optional().build(), Timestamp.builder().optional().build(), Date.builder().optional().build(), Time.builder().optional().build(), }; for (Schema schema : schemas) { Object actual = this.parser.parseString(schema, null); assertNull(actual); } }
Example 2
Source File: GcsSinkTaskTest.java From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 | 6 votes |
private SinkRecord createRecordStringKey(final String topic, final int partition, final String key, final String value, final int offset, final long timestamp) { return new SinkRecord( topic, partition, Schema.OPTIONAL_STRING_SCHEMA, key, Schema.BYTES_SCHEMA, value.getBytes(StandardCharsets.UTF_8), offset, timestamp, TimestampType.CREATE_TIME); }
Example 3
Source File: CloudPubSubSourceTaskTest.java From pubsub with Apache License 2.0 | 6 votes |
/** Tests that the no partition is assigned when the partition scheme is "kafka_partitioner". */ @Test public void testPollWithPartitionSchemeKafkaPartitioner() throws Exception { props.put( CloudPubSubSourceConnector.KAFKA_PARTITION_SCHEME_CONFIG, CloudPubSubSourceConnector.PartitionScheme.KAFKA_PARTITIONER.toString()); task.start(props); ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>()); PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build(); when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse); List<SourceRecord> result = task.poll(); verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class)); assertEquals(1, result.size()); SourceRecord expected = new SourceRecord( null, null, KAFKA_TOPIC, null, Schema.OPTIONAL_STRING_SCHEMA, null, Schema.BYTES_SCHEMA, KAFKA_VALUE); assertRecordsEqual(expected, result.get(0)); assertNull(result.get(0).kafkaPartition()); }
Example 4
Source File: CloudPubSubSourceTaskTest.java From pubsub with Apache License 2.0 | 6 votes |
/** Tests that the correct partition is assigned when the partition scheme is "hash_value". */ @Test public void testPollWithPartitionSchemeHashValue() throws Exception { props.put( CloudPubSubSourceConnector.KAFKA_PARTITION_SCHEME_CONFIG, CloudPubSubSourceConnector.PartitionScheme.HASH_VALUE.toString()); task.start(props); ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>()); PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build(); when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse); List<SourceRecord> result = task.poll(); verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class)); assertEquals(1, result.size()); SourceRecord expected = new SourceRecord( null, null, KAFKA_TOPIC, KAFKA_VALUE.hashCode() % Integer.parseInt(KAFKA_PARTITIONS), Schema.OPTIONAL_STRING_SCHEMA, null, Schema.BYTES_SCHEMA, KAFKA_VALUE); assertRecordsEqual(expected, result.get(0)); }
Example 5
Source File: CloudPubSubSourceTaskTest.java From pubsub with Apache License 2.0 | 6 votes |
/** * Tests when the message(s) retrieved from Cloud Pub/Sub do have an attribute that matches {@link * #KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE} and {@link #KAFKA_MESSAGE_KEY_ATTRIBUTE}. */ @Test public void testPollWithMessageTimestampAttribute() throws Exception{ task.start(props); Map<String, String> attributes = new HashMap<>(); attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE); attributes.put(KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE, KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE_VALUE); ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, attributes); PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build(); when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse); List<SourceRecord> result = task.poll(); verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class)); assertEquals(1, result.size()); SourceRecord expected = new SourceRecord( null, null, KAFKA_TOPIC, 0, Schema.OPTIONAL_STRING_SCHEMA, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE, Schema.BYTES_SCHEMA, KAFKA_VALUE, Long.parseLong(KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE_VALUE)); assertRecordsEqual(expected, result.get(0)); }
Example 6
Source File: CloudPubSubSourceTaskTest.java From pubsub with Apache License 2.0 | 6 votes |
/** * Tests when the message(s) retrieved from Cloud Pub/Sub do have an attribute that matches {@link * #KAFKA_MESSAGE_KEY_ATTRIBUTE}. */ @Test public void testPollWithMessageKeyAttribute() throws Exception { task.start(props); Map<String, String> attributes = new HashMap<>(); attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE); ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, attributes); PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build(); when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse); List<SourceRecord> result = task.poll(); verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class)); assertEquals(1, result.size()); SourceRecord expected = new SourceRecord( null, null, KAFKA_TOPIC, 0, Schema.OPTIONAL_STRING_SCHEMA, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE, Schema.BYTES_SCHEMA, KAFKA_VALUE); assertRecordsEqual(expected, result.get(0)); }
Example 7
Source File: CloudPubSubSourceTaskTest.java From pubsub with Apache License 2.0 | 6 votes |
/** * Tests when the message(s) retrieved from Cloud Pub/Sub do not have an attribute that matches * {@link #KAFKA_MESSAGE_KEY_ATTRIBUTE}. */ @Test public void testPollWithNoMessageKeyAttribute() throws Exception { task.start(props); ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>()); PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build(); when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse); List<SourceRecord> result = task.poll(); verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class)); assertEquals(1, result.size()); SourceRecord expected = new SourceRecord( null, null, KAFKA_TOPIC, 0, Schema.OPTIONAL_STRING_SCHEMA, null, Schema.BYTES_SCHEMA, KAFKA_VALUE); assertRecordsEqual(expected, result.get(0)); }
Example 8
Source File: CloudPubSubSourceTask.java From pubsub with Apache License 2.0 | 6 votes |
private SourceRecord createRecordWithHeaders(Map<String, String> messageAttributes, Map<String,String> ack, String key, byte[] messageBytes, Long timestamp) { ConnectHeaders headers = new ConnectHeaders(); for (Entry<String, String> attribute : messageAttributes.entrySet()) { if (!attribute.getKey().equals(kafkaMessageKeyAttribute)) { headers.addString(attribute.getKey(), attribute.getValue()); } } return new SourceRecord( null, ack, kafkaTopic, selectPartition(key, messageBytes), Schema.OPTIONAL_STRING_SCHEMA, key, Schema.BYTES_SCHEMA, messageBytes, timestamp, headers); }
Example 9
Source File: ToJSON.java From kafka-connect-transform-common with Apache License 2.0 | 6 votes |
SchemaAndValue schemaAndValue(Schema inputSchema, Object input) { final byte[] buffer = this.converter.fromConnectData("dummy", inputSchema, input); final Schema schema; final Object value; switch (this.config.outputSchema) { case STRING: value = new String(buffer, Charsets.UTF_8); schema = Schema.OPTIONAL_STRING_SCHEMA; break; case BYTES: value = buffer; schema = Schema.OPTIONAL_BYTES_SCHEMA; break; default: throw new UnsupportedOperationException( String.format( "Schema type (%s)'%s' is not supported.", ToJSONConfig.OUTPUT_SCHEMA_CONFIG, this.config.outputSchema ) ); } return new SchemaAndValue(schema, value); }
Example 10
Source File: UnivocityFileReader.java From kafka-connect-fs with Apache License 2.0 | 6 votes |
private Schema strToSchema(String dataType) { switch (DataType.valueOf(dataType.trim().toUpperCase())) { case BYTE: return dataTypeMappingError && !allowNulls ? Schema.INT8_SCHEMA : Schema.OPTIONAL_INT8_SCHEMA; case SHORT: return dataTypeMappingError && !allowNulls ? Schema.INT16_SCHEMA : Schema.OPTIONAL_INT16_SCHEMA; case INT: return dataTypeMappingError && !allowNulls ? Schema.INT32_SCHEMA : Schema.OPTIONAL_INT32_SCHEMA; case LONG: return dataTypeMappingError && !allowNulls ? Schema.INT64_SCHEMA : Schema.OPTIONAL_INT64_SCHEMA; case FLOAT: return dataTypeMappingError && !allowNulls ? Schema.FLOAT32_SCHEMA : Schema.OPTIONAL_FLOAT32_SCHEMA; case DOUBLE: return dataTypeMappingError && !allowNulls ? Schema.FLOAT64_SCHEMA : Schema.OPTIONAL_FLOAT64_SCHEMA; case BOOLEAN: return dataTypeMappingError && !allowNulls ? Schema.BOOLEAN_SCHEMA : Schema.OPTIONAL_BOOLEAN_SCHEMA; case BYTES: return dataTypeMappingError && !allowNulls ? Schema.BYTES_SCHEMA : Schema.OPTIONAL_BYTES_SCHEMA; case STRING: default: return dataTypeMappingError && !allowNulls ? Schema.STRING_SCHEMA : Schema.OPTIONAL_STRING_SCHEMA; } }
Example 11
Source File: JsonNodeTest.java From connect-utils with Apache License 2.0 | 5 votes |
@Test public void nullableTests() throws IOException { final Schema[] schemas = new Schema[]{ Schema.OPTIONAL_BOOLEAN_SCHEMA, Schema.OPTIONAL_FLOAT32_SCHEMA, Schema.OPTIONAL_FLOAT64_SCHEMA, Schema.OPTIONAL_INT8_SCHEMA, Schema.OPTIONAL_INT16_SCHEMA, Schema.OPTIONAL_INT32_SCHEMA, Schema.OPTIONAL_INT64_SCHEMA, Schema.OPTIONAL_STRING_SCHEMA, Decimal.builder(1).optional().build(), Timestamp.builder().optional().build(), Date.builder().optional().build(), Time.builder().optional().build(), }; for (Schema schema : schemas) { JsonNode inputNode = null; Object actual = this.parser.parseJsonNode(schema, inputNode); assertNull(actual); inputNode = objectMapper.readTree("{\"foo\": null}"); inputNode = inputNode.findValue("foo"); actual = this.parser.parseJsonNode(schema, inputNode); assertNull(actual); } }
Example 12
Source File: CloudPubSubSourceTaskTest.java From pubsub with Apache License 2.0 | 5 votes |
/** * Tests when the message retrieved from Cloud Pub/Sub have several attributes, including * one that matches {@link #KAFKA_MESSAGE_KEY_ATTRIBUTE} and uses Kafka Record Headers to store them */ @Test public void testPollWithMultipleAttributesAndRecordHeaders() throws Exception { props.put(CloudPubSubSourceConnector.USE_KAFKA_HEADERS, "true"); task.start(props); Map<String, String> attributes = new HashMap<>(); attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE); attributes.put("attribute1", "attribute_value1"); attributes.put("attribute2", "attribute_value2"); ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, attributes); PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build(); when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse); List<SourceRecord> result = task.poll(); verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class)); assertEquals(1, result.size()); ConnectHeaders headers = new ConnectHeaders(); headers.addString("attribute1", "attribute_value1"); headers.addString("attribute2", "attribute_value2"); SourceRecord expected = new SourceRecord( null, null, KAFKA_TOPIC, 0, Schema.OPTIONAL_STRING_SCHEMA, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE, Schema.BYTES_SCHEMA, KAFKA_VALUE, Long.parseLong(KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE_VALUE), headers); assertRecordsEqual(expected, result.get(0)); }
Example 13
Source File: CloudPubSubSourceTaskTest.java From pubsub with Apache License 2.0 | 5 votes |
/** * Tests when the message retrieved from Cloud Pub/Sub have several attributes, including * one that matches {@link #KAFKA_MESSAGE_KEY_ATTRIBUTE} */ @Test public void testPollWithMultipleAttributes() throws Exception { task.start(props); Map<String, String> attributes = new HashMap<>(); attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE); attributes.put("attribute1", "attribute_value1"); attributes.put("attribute2", "attribute_value2"); ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, attributes); PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build(); when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse); List<SourceRecord> result = task.poll(); verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class)); assertEquals(1, result.size()); Schema expectedSchema = SchemaBuilder.struct() .field(ConnectorUtils.KAFKA_MESSAGE_CPS_BODY_FIELD, Schema.BYTES_SCHEMA) .field("attribute1", Schema.STRING_SCHEMA) .field("attribute2", Schema.STRING_SCHEMA) .build(); Struct expectedValue = new Struct(expectedSchema) .put(ConnectorUtils.KAFKA_MESSAGE_CPS_BODY_FIELD, KAFKA_VALUE) .put("attribute1", "attribute_value1") .put("attribute2", "attribute_value2"); SourceRecord expected = new SourceRecord( null, null, KAFKA_TOPIC, 0, Schema.OPTIONAL_STRING_SCHEMA, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE, expectedSchema, expectedValue); assertRecordsEqual(expected, result.get(0)); }
Example 14
Source File: SchemaTest.java From schema-registry-transfer-smt with Apache License 2.0 | 5 votes |
@Test public void testNonByteTypeSchemas() { Schema[] schemas = new Schema[]{ // Boolean Schema.BOOLEAN_SCHEMA, Schema.OPTIONAL_BOOLEAN_SCHEMA, // Integers Schema.INT8_SCHEMA, Schema.INT16_SCHEMA, Schema.INT32_SCHEMA, Schema.INT64_SCHEMA, Schema.OPTIONAL_INT8_SCHEMA, Schema.OPTIONAL_INT16_SCHEMA, Schema.OPTIONAL_INT32_SCHEMA, Schema.OPTIONAL_INT64_SCHEMA, // Floats Schema.FLOAT32_SCHEMA, Schema.FLOAT64_SCHEMA, Schema.OPTIONAL_FLOAT32_SCHEMA, Schema.OPTIONAL_FLOAT64_SCHEMA, // String Schema.STRING_SCHEMA, Schema.OPTIONAL_STRING_SCHEMA, // Struct with a field of bytes SchemaBuilder.struct().name("record"). field("foo", Schema.BYTES_SCHEMA) .build(), SchemaBuilder.struct().name("record"). field("foo", Schema.OPTIONAL_BYTES_SCHEMA) .build(), // map<bytes, bytes> SchemaBuilder.map(Schema.BYTES_SCHEMA, Schema.OPTIONAL_BYTES_SCHEMA).build(), // array<bytes> SchemaBuilder.array(Schema.OPTIONAL_BYTES_SCHEMA).build() }; for (Schema s : schemas) { assertFalse(ConnectSchemaUtil.isBytesSchema(s)); } }
Example 15
Source File: BytesToString.java From kafka-connect-transform-common with Apache License 2.0 | 4 votes |
@Override protected SchemaAndValue processBytes(R record, Schema inputSchema, byte[] input) { final Schema outputSchema = inputSchema.isOptional() ? Schema.OPTIONAL_STRING_SCHEMA : Schema.STRING_SCHEMA; final String output = new String(input, this.config.charset); return new SchemaAndValue(outputSchema, output); }
Example 16
Source File: JsonFileReader.java From kafka-connect-fs with Apache License 2.0 | 4 votes |
private static Schema extractSchema(JsonNode jsonNode) { switch (jsonNode.getNodeType()) { case BOOLEAN: return Schema.OPTIONAL_BOOLEAN_SCHEMA; case NUMBER: if (jsonNode.isShort()) { return Schema.OPTIONAL_INT8_SCHEMA; } else if (jsonNode.isInt()) { return Schema.OPTIONAL_INT32_SCHEMA; } else if (jsonNode.isLong()) { return Schema.OPTIONAL_INT64_SCHEMA; } else if (jsonNode.isFloat()) { return Schema.OPTIONAL_FLOAT32_SCHEMA; } else if (jsonNode.isDouble()) { return Schema.OPTIONAL_FLOAT64_SCHEMA; } else if (jsonNode.isBigInteger()) { return Schema.OPTIONAL_INT64_SCHEMA; } else if (jsonNode.isBigDecimal()) { return Schema.OPTIONAL_FLOAT64_SCHEMA; } else { return Schema.OPTIONAL_FLOAT64_SCHEMA; } case STRING: return Schema.OPTIONAL_STRING_SCHEMA; case BINARY: return Schema.OPTIONAL_BYTES_SCHEMA; case ARRAY: Iterable<JsonNode> elements = jsonNode::elements; Schema arraySchema = StreamSupport.stream(elements.spliterator(), false) .findFirst().map(JsonFileReader::extractSchema) .orElse(SchemaBuilder.struct().build()); return SchemaBuilder.array(arraySchema).build(); case OBJECT: SchemaBuilder builder = SchemaBuilder.struct(); jsonNode.fields() .forEachRemaining(field -> builder.field(field.getKey(), extractSchema(field.getValue()))); return builder.build(); default: return SchemaBuilder.struct().optional().build(); } }
Example 17
Source File: BaseRecordBuilder.java From kafka-connect-mq-source with Apache License 2.0 | 4 votes |
/** * Gets the key to use for the Kafka Connect SourceRecord. * * @param context the JMS context to use for building messages * @param topic the Kafka topic * @param message the message * * @return the Kafka Connect SourceRecord's key * * @throws JMSException Message could not be converted */ public SchemaAndValue getKey(JMSContext context, String topic, Message message) throws JMSException { Schema keySchema = null; Object key = null; String keystr; switch (keyheader) { case MESSAGE_ID: keySchema = Schema.OPTIONAL_STRING_SCHEMA; keystr = message.getJMSMessageID(); if (keystr.startsWith("ID:", 0)) { key = keystr.substring(3); } else { key = keystr; } break; case CORRELATION_ID: keySchema = Schema.OPTIONAL_STRING_SCHEMA; keystr = message.getJMSCorrelationID(); if (keystr.startsWith("ID:", 0)) { key = keystr.substring(3); } else { key = keystr; } break; case CORRELATION_ID_AS_BYTES: keySchema = Schema.OPTIONAL_BYTES_SCHEMA; key = message.getJMSCorrelationIDAsBytes(); break; case DESTINATION: keySchema = Schema.OPTIONAL_STRING_SCHEMA; key = message.getJMSDestination().toString(); break; default: break; } return new SchemaAndValue(keySchema, key); }
Example 18
Source File: CloudPubSubSourceTaskTest.java From pubsub with Apache License 2.0 | 4 votes |
/** * Tests that the correct partition is assigned when the partition scheme is "hash_key". The test * has two cases, one where a key does exist and one where it does not. */ @Test public void testPollWithPartitionSchemeHashKey() throws Exception { props.put( CloudPubSubSourceConnector.KAFKA_PARTITION_SCHEME_CONFIG, CloudPubSubSourceConnector.PartitionScheme.HASH_KEY.toString()); task.start(props); Map<String, String> attributes = new HashMap<>(); attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE); ReceivedMessage withoutKey = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>()); ReceivedMessage withKey = createReceivedMessage(ACK_ID2, CPS_MESSAGE, attributes); PullResponse stubbedPullResponse = PullResponse.newBuilder() .addReceivedMessages(0, withKey) .addReceivedMessages(1, withoutKey) .build(); when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse); List<SourceRecord> result = task.poll(); verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class)); assertEquals(2, result.size()); SourceRecord expectedForMessageWithKey = new SourceRecord( null, null, KAFKA_TOPIC, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE.hashCode() % Integer.parseInt(KAFKA_PARTITIONS), Schema.OPTIONAL_STRING_SCHEMA, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE, Schema.BYTES_SCHEMA, KAFKA_VALUE); SourceRecord expectedForMessageWithoutKey = new SourceRecord( null, null, KAFKA_TOPIC, 0, Schema.OPTIONAL_STRING_SCHEMA, null, Schema.BYTES_SCHEMA, KAFKA_VALUE); assertRecordsEqual(expectedForMessageWithKey, result.get(0)); assertArrayEquals((byte[])expectedForMessageWithoutKey.value(), (byte[])result.get(1).value()); }
Example 19
Source File: OracleSourceConnectorUtils.java From kafka-connect-oracle with Apache License 2.0 | 4 votes |
protected void loadTable(String owner,String tableName,String operation) throws SQLException{ log.info("Getting dictionary details for table : {}",tableName); //SchemaBuilder dataSchemaBuiler = SchemaBuilder.struct().name((config.getDbNameAlias()+DOT+owner+DOT+tableName+DOT+"Value").toLowerCase()); SchemaBuilder dataSchemaBuiler = SchemaBuilder.struct().name("value"); String mineTableColsSql=OracleConnectorSQL.TABLE_WITH_COLS; if (config.getMultitenant()){ mineTableColsSql=OracleConnectorSQL.TABLE_WITH_COLS_CDB; } mineTableColsSql=mineTableColsSql.replace("$TABLE_OWNER$", owner).replace("$TABLE_NAME$", tableName); /*if (config.getMultitenant()) { mineTableCols=dbConn.prepareCall(sql.getContainerDictionarySQL()); } else { mineTableCols=dbConn.prepareCall(sql.getDictionarySQL()); } mineTableCols.setString(ConnectorSQL.PARAMETER_OWNER, owner); mineTableCols.setString(ConnectorSQL.PARAMETER_TABLE_NAME, tableName);*/ mineTableCols = dbConn.prepareCall(mineTableColsSql); mineTableColsResultSet=mineTableCols.executeQuery(); if (!mineTableColsResultSet.isBeforeFirst()) { // TODO: consider throwing up here, or an NPE will be thrown in OracleSourceTask.poll() log.warn("mineTableCols has no results for {}.{}", owner, tableName); } while(mineTableColsResultSet.next()){ String columnName = mineTableColsResultSet.getString(COLUMN_NAME_FIELD); Boolean nullable = mineTableColsResultSet.getString(NULLABLE_FIELD).equals("Y") ? true:false; String dataType = mineTableColsResultSet.getString(DATA_TYPE_FIELD); if (dataType.contains(TIMESTAMP_TYPE)) dataType=TIMESTAMP_TYPE; int dataLength = mineTableColsResultSet.getInt(DATA_LENGTH_FIELD); int dataScale = mineTableColsResultSet.getInt(DATA_SCALE_FIELD); int dataPrecision = mineTableColsResultSet.getInt(DATA_PRECISION_FIELD); Boolean pkColumn = mineTableColsResultSet.getInt(PK_COLUMN_FIELD)==1 ? true:false; Boolean uqColumn = mineTableColsResultSet.getInt(UQ_COLUMN_FIELD)==1 ? true:false; Schema columnSchema = null; switch (dataType){ case NUMBER_TYPE: { if (dataScale>0 || dataPrecision == 0){ columnSchema = nullable ? Schema.OPTIONAL_FLOAT64_SCHEMA : Schema.FLOAT64_SCHEMA; }else{ switch (dataPrecision){ case 1: case 2: columnSchema = nullable ? Schema.OPTIONAL_INT8_SCHEMA : Schema.INT8_SCHEMA; break; case 3: case 4: columnSchema = nullable ? Schema.OPTIONAL_INT16_SCHEMA : Schema.INT16_SCHEMA; break; case 5: case 6: case 7: case 8: case 9: columnSchema = nullable ? Schema.OPTIONAL_INT32_SCHEMA : Schema.INT32_SCHEMA; break; default: columnSchema = nullable ? Schema.OPTIONAL_INT64_SCHEMA : Schema.INT64_SCHEMA; break; } } break; } case "CHAR": case "VARCHAR": case "VARCHAR2": case "NCHAR": case "NVARCHAR": case "NVARCHAR2": case "LONG": case "CLOB": { columnSchema = nullable ? Schema.OPTIONAL_STRING_SCHEMA : Schema.STRING_SCHEMA; break; } case DATE_TYPE: case TIMESTAMP_TYPE: { columnSchema = nullable ? OPTIONAL_TIMESTAMP_SCHEMA : TIMESTAMP_SCHEMA; break; } default: columnSchema = nullable ? Schema.OPTIONAL_STRING_SCHEMA : Schema.STRING_SCHEMA; break; } dataSchemaBuiler.field(columnName,columnSchema); com.ecer.kafka.connect.oracle.models.Column column = new com.ecer.kafka.connect.oracle.models.Column(owner, tableName, columnName, nullable, dataType, dataLength, dataScale, pkColumn, uqColumn,columnSchema); String keyTabCols = owner+DOT+tableName+DOT+columnName; tabColsMap.put(keyTabCols, column); log.debug("tabColsMap entry added: {} = {}", keyTabCols, column.toString()); } Schema tSchema = dataSchemaBuiler.optional().build(); tableSchema.put(owner+DOT+tableName, tSchema); mineTableColsResultSet.close(); mineTableCols.close(); }
Example 20
Source File: CloudPubSubSourceTaskTest.java From pubsub with Apache License 2.0 | 4 votes |
/** * Tests that the correct partition is assigned when the partition scheme is "round_robin". The * tests makes sure to submit an approrpriate number of messages to poll() so that all partitions * in the round robin are hit once. */ @Test public void testPollWithPartitionSchemeRoundRobin() throws Exception { task.start(props); ReceivedMessage rm1 = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>()); ReceivedMessage rm2 = createReceivedMessage(ACK_ID2, CPS_MESSAGE, new HashMap<String, String>()); ReceivedMessage rm3 = createReceivedMessage(ACK_ID3, CPS_MESSAGE, new HashMap<String, String>()); ReceivedMessage rm4 = createReceivedMessage(ACK_ID4, CPS_MESSAGE, new HashMap<String, String>()); PullResponse stubbedPullResponse = PullResponse.newBuilder() .addReceivedMessages(0, rm1) .addReceivedMessages(1, rm2) .addReceivedMessages(2, rm3) .addReceivedMessages(3, rm4) .build(); when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse); List<SourceRecord> result = task.poll(); verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class)); assertEquals(4, result.size()); SourceRecord expected1 = new SourceRecord( null, null, KAFKA_TOPIC, 0, Schema.OPTIONAL_STRING_SCHEMA, null, Schema.BYTES_SCHEMA, KAFKA_VALUE); SourceRecord expected2 = new SourceRecord( null, null, KAFKA_TOPIC, 1, Schema.OPTIONAL_STRING_SCHEMA, null, Schema.BYTES_SCHEMA, KAFKA_VALUE); SourceRecord expected3 = new SourceRecord( null, null, KAFKA_TOPIC, 2, Schema.OPTIONAL_STRING_SCHEMA, null, Schema.BYTES_SCHEMA, KAFKA_VALUE); SourceRecord expected4 = new SourceRecord( null, null, KAFKA_TOPIC, 0, Schema.OPTIONAL_STRING_SCHEMA, null, Schema.BYTES_SCHEMA, KAFKA_VALUE); assertRecordsEqual(expected1, result.get(0)); assertRecordsEqual(expected2, result.get(1)); assertRecordsEqual(expected3, result.get(2)); assertRecordsEqual(expected4, result.get(3)); }