io.debezium.util.Testing Java Examples

The following examples show how to use io.debezium.util.Testing. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SimpleAclOperatorIT.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
@BeforeAll
public static void beforeAll() {
    vertx = Vertx.vertx();

    try {
        kafkaCluster =
                new KafkaCluster()
                        .usingDirectory(Testing.Files.createTestingDirectory("simple-acl-operator-integration-test"))
                        .deleteDataPriorToStartup(true)
                        .deleteDataUponShutdown(true)
                        .addBrokers(1)
                        .withKafkaConfiguration(kafkaClusterConfig())
                        .startup();
    } catch (IOException e) {
        assertThat(false, is(true));
    }

    simpleAclOperator = new SimpleAclOperator(vertx,
            new DefaultAdminClientProvider().createAdminClient(kafkaCluster.brokerList(), null, null, null));
}
 
Example #2
Source File: KafkaTestResource.java    From quarkus with Apache License 2.0 6 votes vote down vote up
@Override
public Map<String, String> start() {
    try {
        Properties props = new Properties();
        props.setProperty("zookeeper.connection.timeout.ms", "45000");
        File directory = Testing.Files.createTestingDirectory("kafka-data", true);
        kafka = new KafkaCluster().withPorts(2182, 19092)
                .addBrokers(1)
                .usingDirectory(directory)
                .deleteDataUponShutdown(true)
                .withKafkaConfiguration(props)
                .deleteDataPriorToStartup(true)
                .startup();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    return Collections.emptyMap();
}
 
Example #3
Source File: KafkaFacade.java    From strimzi-kafka-bridge with Apache License 2.0 6 votes vote down vote up
private static KafkaCluster kafkaCluster() {

        if (kafkaCluster != null) {
            throw new IllegalStateException();
        }
        dataDir = Testing.Files.createTestingDirectory(DATA_DIR);

        Properties props = new Properties();
        props.put("auto.create.topics.enable", "false");

        kafkaCluster =
            new KafkaCluster()
                .usingDirectory(dataDir)
                .withPorts(ZOOKEEPER_PORT, KAFKA_PORT)
                .withKafkaConfiguration(props);
        return kafkaCluster;
    }
 
Example #4
Source File: StreamingDatatypesIT.java    From debezium-incubator with Apache License 2.0 6 votes vote down vote up
@Before
public void before() throws Exception {
    setConsumeTimeout(TestHelper.defaultMessageConsumerPollTimeout(), TimeUnit.SECONDS);
    dropTables();
    initializeConnectorTestFramework();
    Testing.Files.delete(TestHelper.DB_HISTORY_PATH);

    Configuration config = connectorConfig()
            .build();

    start(OracleConnector.class, config);
    assertConnectorIsRunning();

    waitForSnapshotToBeCompleted(TestHelper.CONNECTOR_NAME, TestHelper.SERVER_NAME);
    createTables();
}
 
Example #5
Source File: Db2ConnectorIT.java    From debezium-incubator with Apache License 2.0 6 votes vote down vote up
@Before
public void before() throws SQLException {
    connection = TestHelper.testConnection();
    connection.execute("DELETE FROM ASNCDC.IBMSNAP_REGISTER");
    connection.execute(
            "CREATE TABLE tablea (id int not null, cola varchar(30), primary key (id))",
            "CREATE TABLE tableb (id int not null, colb varchar(30), primary key (id))",
            "CREATE TABLE masked_hashed_column_table (id int not null, name varchar(255), name2 varchar(255), name3 varchar(20), primary key (id))",
            "CREATE TABLE truncated_column_table (id int not null, name varchar(20), primary key (id))",
            "CREATE TABLE dt_table (id int not null, c1 int, c2 int, c3a numeric(5,2), c3b varchar(128), f1 float(10), f2 decimal(8,4), primary key(id))",
            "INSERT INTO tablea VALUES(1, 'a')");
    TestHelper.enableTableCdc(connection, "TABLEA");
    TestHelper.enableTableCdc(connection, "TABLEB");
    TestHelper.enableTableCdc(connection, "MASKED_HASHED_COLUMN_TABLE");
    TestHelper.enableTableCdc(connection, "TRUNCATED_COLUMN_TABLE");
    TestHelper.enableTableCdc(connection, "DT_TABLE");
    initializeConnectorTestFramework();
    Testing.Files.delete(TestHelper.DB_HISTORY_PATH);
    Testing.Print.enable();
}
 
Example #6
Source File: SchemaHistoryTopicIT.java    From debezium-incubator with Apache License 2.0 6 votes vote down vote up
@Before
public void before() throws SQLException {
    connection = TestHelper.testConnection();
    TestHelper.dropTable(connection, "debezium.tablea");
    TestHelper.dropTable(connection, "debezium.tableb");
    TestHelper.dropTable(connection, "debezium.tablec");
    connection.execute(
            "CREATE TABLE debezium.tablea (id numeric(9,0) not null, cola varchar2(30), primary key(id))",
            "CREATE TABLE debezium.tableb (id numeric(9,0) not null, colb varchar2(30), primary key(id))",
            "CREATE TABLE debezium.tablec (id numeric(9,0) not null, colc varchar2(30), primary key(id))");

    connection.execute("GRANT SELECT ON debezium.tablea to  " + TestHelper.CONNECTOR_USER);
    connection.execute("ALTER TABLE debezium.tablea ADD SUPPLEMENTAL LOG DATA (ALL) COLUMNS");
    connection.execute("GRANT SELECT ON debezium.tableb to  " + TestHelper.CONNECTOR_USER);
    connection.execute("ALTER TABLE debezium.tableb ADD SUPPLEMENTAL LOG DATA (ALL) COLUMNS");
    connection.execute("GRANT SELECT ON debezium.tablec to  " + TestHelper.CONNECTOR_USER);
    connection.execute("ALTER TABLE debezium.tablec ADD SUPPLEMENTAL LOG DATA (ALL) COLUMNS");

    initializeConnectorTestFramework();
    Testing.Files.delete(TestHelper.DB_HISTORY_PATH);
}
 
Example #7
Source File: SnapshotDatatypesIT.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
@Before
public void before() throws Exception {
    setConsumeTimeout(TestHelper.defaultMessageConsumerPollTimeout(), TimeUnit.SECONDS);
    initializeConnectorTestFramework();
    Testing.Debug.enable();
    Testing.Files.delete(TestHelper.DB_HISTORY_PATH);

    Configuration config = connectorConfig()
            .build();

    start(OracleConnector.class, config);
    assertConnectorIsRunning();

    waitForSnapshotToBeCompleted(TestHelper.CONNECTOR_NAME, TestHelper.SERVER_NAME);
}
 
Example #8
Source File: OracleConnectorIT.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
@Before
public void before() throws SQLException {
    connection.execute("delete from debezium.customer");
    connection.execute("delete from debezium.masked_hashed_column_table");
    connection.execute("delete from debezium.truncated_column_table");
    connection.execute("delete from debezium.dt_table");
    setConsumeTimeout(TestHelper.defaultMessageConsumerPollTimeout(), TimeUnit.SECONDS);
    initializeConnectorTestFramework();
    Testing.Files.delete(TestHelper.DB_HISTORY_PATH);
}
 
Example #9
Source File: AbstractOracleDatatypesTest.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
@Test
public void timeTypes() throws Exception {
    int expectedRecordCount = 0;

    if (insertRecordsDuringTest()) {
        insertTimeTypes();
    }

    Testing.debug("Inserted");
    expectedRecordCount++;

    final SourceRecords records = consumeRecordsByTopic(expectedRecordCount);

    List<SourceRecord> testTableRecords = records.recordsForTopic("server1.DEBEZIUM.TYPE_TIME");
    assertThat(testTableRecords).hasSize(expectedRecordCount);
    SourceRecord record = testTableRecords.get(0);

    VerifyRecord.isValid(record);

    // insert
    if (insertRecordsDuringTest()) {
        VerifyRecord.isValidInsert(record, "ID", 1);
    }
    else {
        VerifyRecord.isValidRead(record, "ID", 1);
    }

    Struct after = (Struct) ((Struct) record.value()).get("after");
    assertRecord(after, EXPECTED_TIME);
}
 
Example #10
Source File: AbstractOracleDatatypesTest.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
@Test
public void intTypes() throws Exception {
    int expectedRecordCount = 0;

    if (insertRecordsDuringTest()) {
        insertIntTypes();
    }

    Testing.debug("Inserted");
    expectedRecordCount++;

    final SourceRecords records = consumeRecordsByTopic(expectedRecordCount);

    List<SourceRecord> testTableRecords = records.recordsForTopic("server1.DEBEZIUM.TYPE_INT");
    assertThat(testTableRecords).hasSize(expectedRecordCount);
    SourceRecord record = testTableRecords.get(0);

    VerifyRecord.isValid(record);

    // insert
    if (insertRecordsDuringTest()) {
        VerifyRecord.isValidInsert(record, "ID", 1);
    }
    else {
        VerifyRecord.isValidRead(record, "ID", 1);
    }

    Struct after = (Struct) ((Struct) record.value()).get("after");
    assertRecord(after, EXPECTED_INT);
}
 
Example #11
Source File: TransactionMetadataIT.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
@Before
public void before() throws SQLException {
    connection.execute("delete from debezium.customer");
    setConsumeTimeout(TestHelper.defaultMessageConsumerPollTimeout(), TimeUnit.SECONDS);
    initializeConnectorTestFramework();
    Testing.Files.delete(TestHelper.DB_HISTORY_PATH);
}
 
Example #12
Source File: KafkaTestBase.java    From smallrye-reactive-messaging with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void startKafkaBroker() throws IOException {
    Properties props = new Properties();
    props.setProperty("zookeeper.connection.timeout.ms", "10000");
    File directory = Testing.Files.createTestingDirectory(System.getProperty("java.io.tmpdir"), true);
    kafka = new KafkaCluster().withPorts(2182, 9092).addBrokers(1)
            .usingDirectory(directory)
            .deleteDataUponShutdown(false)
            .withKafkaConfiguration(props)
            .deleteDataPriorToStartup(true)
            .startup();
}
 
Example #13
Source File: AbstractOracleDatatypesTest.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
@Test
public void fpTypes() throws Exception {
    int expectedRecordCount = 0;

    if (insertRecordsDuringTest()) {
        insertFpTypes();
    }

    Testing.debug("Inserted");
    expectedRecordCount++;

    final SourceRecords records = consumeRecordsByTopic(expectedRecordCount);

    List<SourceRecord> testTableRecords = records.recordsForTopic("server1.DEBEZIUM.TYPE_FP");
    assertThat(testTableRecords).hasSize(expectedRecordCount);
    SourceRecord record = testTableRecords.get(0);

    VerifyRecord.isValid(record);

    // insert
    if (insertRecordsDuringTest()) {
        VerifyRecord.isValidInsert(record, "ID", 1);
    }
    else {
        VerifyRecord.isValidRead(record, "ID", 1);
    }

    Struct after = (Struct) ((Struct) record.value()).get("after");
    assertRecord(after, EXPECTED_FP);
}
 
Example #14
Source File: AbstractOracleDatatypesTest.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
@Test
public void stringTypes() throws Exception {
    int expectedRecordCount = 0;

    if (insertRecordsDuringTest()) {
        insertStringTypes();
    }

    Testing.debug("Inserted");
    expectedRecordCount++;

    final SourceRecords records = consumeRecordsByTopic(expectedRecordCount);

    List<SourceRecord> testTableRecords = records.recordsForTopic("server1.DEBEZIUM.TYPE_STRING");
    assertThat(testTableRecords).hasSize(expectedRecordCount);
    SourceRecord record = testTableRecords.get(0);

    VerifyRecord.isValid(record);

    // insert
    if (insertRecordsDuringTest()) {
        VerifyRecord.isValidInsert(record, "ID", 1);
    }
    else {
        VerifyRecord.isValidRead(record, "ID", 1);
    }

    Struct after = (Struct) ((Struct) record.value()).get("after");
    assertRecord(after, EXPECTED_STRING);
}
 
Example #15
Source File: TransactionMetadataIT.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
@Before
public void before() throws SQLException {
    connection = TestHelper.testConnection();
    connection.execute("DELETE FROM ASNCDC.IBMSNAP_REGISTER");
    connection.execute(
            "CREATE TABLE tablea (id int not null, cola varchar(30), primary key (id))",
            "CREATE TABLE tableb (id int not null, colb varchar(30), primary key (id))",
            "INSERT INTO tablea VALUES(1, 'a')");
    TestHelper.enableTableCdc(connection, "TABLEA");
    TestHelper.enableTableCdc(connection, "TABLEB");
    initializeConnectorTestFramework();
    Testing.Files.delete(TestHelper.DB_HISTORY_PATH);
    Testing.Print.enable();
}
 
Example #16
Source File: KafkaClusterTestBase.java    From vertx-kafka-client with Apache License 2.0 5 votes vote down vote up
protected static KafkaCluster kafkaCluster() {
  if (kafkaCluster != null) {
    throw new IllegalStateException();
  }
  dataDir = Testing.Files.createTestingDirectory("cluster");
  kafkaCluster = new KafkaCluster().usingDirectory(dataDir).withPorts(2181, 9092);
  return kafkaCluster;
}
 
Example #17
Source File: SchemaHistoryTopicIT.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
@Before
public void before() throws SQLException {
    connection = TestHelper.testConnection();
    connection.execute("DELETE FROM ASNCDC.IBMSNAP_REGISTER");
    connection.execute(
            "CREATE TABLE tablea (id int not null, cola varchar(30), primary key(id))",
            "CREATE TABLE tableb (id int not null, colb varchar(30), primary key(id))",
            "CREATE TABLE tablec (id int not null, colc varchar(30), primary key(id))");
    TestHelper.enableTableCdc(connection, "TABLEA");
    TestHelper.enableTableCdc(connection, "TABLEB");

    initializeConnectorTestFramework();
    Testing.Files.delete(TestHelper.DB_HISTORY_PATH);
}
 
Example #18
Source File: KafkaSASLTestResource.java    From quarkus with Apache License 2.0 5 votes vote down vote up
@Override
public Map<String, String> start() {
    try {
        File directory = Testing.Files.createTestingDirectory("kafka-data-sasl", true);

        enableServerJaasConf();

        Properties props = new Properties();
        props.setProperty("zookeeper.connection.timeout.ms", "45000");
        props.setProperty("listener.security.protocol.map", "CLIENT:SASL_PLAINTEXT");
        props.setProperty("listeners", "CLIENT://:19094");
        props.setProperty("inter.broker.listener.name", "CLIENT");

        props.setProperty("sasl.enabled.mechanisms", "PLAIN");
        props.setProperty("sasl.mechanism.inter.broker.protocol", "PLAIN");

        kafka = new KafkaCluster()
                .withPorts(2184, 19094)
                .addBrokers(1)
                .usingDirectory(directory)
                .deleteDataUponShutdown(true)
                .withKafkaConfiguration(props)
                .deleteDataPriorToStartup(true)
                .startup();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    return Collections.emptyMap();
}
 
Example #19
Source File: Main.java    From redpipe with Apache License 2.0 4 votes vote down vote up
private static void onStart() {
	System.err.println("Started");

	// Kafka setup for the example
    File dataDir = Testing.Files.createTestingDirectory("cluster");
    dataDir.deleteOnExit();
    KafkaCluster kafkaCluster;
	try {
		kafkaCluster = new KafkaCluster()
		  .usingDirectory(dataDir)
		  .withPorts(2181, 9092)
		  .addBrokers(1)
		  .deleteDataPriorToStartup(true)
		  .startup();
	} catch (IOException e) {
		throw new RuntimeException(e);
	}

    // Deploy the dashboard
    JsonObject consumerConfig = new JsonObject((Map) kafkaCluster.useTo()
      .getConsumerProperties("the_group", "the_client", OffsetResetStrategy.LATEST));

    AppGlobals globals = AppGlobals.get();
    
    // Create the consumer
	KafkaConsumer<String, JsonObject> consumer = KafkaConsumer.create(globals.getVertx(), (Map)consumerConfig.getMap(), 
    		String.class, JsonObject.class);
	
	BehaviorSubject<JsonObject> consumerReporter = BehaviorSubject.create();
	consumer.toObservable().subscribe(record -> consumerReporter.onNext(record.value()));
	
    // Subscribe to Kafka
    consumer.subscribe("the_topic");
    globals.setGlobal("consumer", consumerReporter);
    

    // Deploy the metrics collector : 3 times
    JsonObject producerConfig = new JsonObject((Map) kafkaCluster.useTo()
      .getProducerProperties("the_producer"));
    globals.getVertx().deployVerticle(
      MetricsVerticle.class.getName(),
      new DeploymentOptions().setConfig(producerConfig).setInstances(3)
    );
}
 
Example #20
Source File: AbstractOracleDatatypesTest.java    From debezium-incubator with Apache License 2.0 4 votes vote down vote up
@Test
@FixFor("DBZ-1552")
public void fpTypesAsString() throws Exception {
    stopConnector();
    initializeConnectorTestFramework();
    final Configuration config = connectorConfig()
            .with(OracleConnectorConfig.DECIMAL_HANDLING_MODE, DecimalMode.STRING)
            .build();

    start(OracleConnector.class, config);
    assertConnectorIsRunning();

    waitForSnapshotToBeCompleted(TestHelper.CONNECTOR_NAME, TestHelper.SERVER_NAME);

    int expectedRecordCount = 0;

    if (insertRecordsDuringTest()) {
        insertFpTypes();
    }

    Testing.debug("Inserted");
    expectedRecordCount++;

    final SourceRecords records = consumeRecordsByTopic(expectedRecordCount);

    List<SourceRecord> testTableRecords = records.recordsForTopic("server1.DEBEZIUM.TYPE_FP");
    assertThat(testTableRecords).hasSize(expectedRecordCount);
    SourceRecord record = testTableRecords.get(0);

    VerifyRecord.isValid(record);

    // insert
    if (insertRecordsDuringTest()) {
        VerifyRecord.isValidInsert(record, "ID", 1);
    }
    else {
        VerifyRecord.isValidRead(record, "ID", 1);
    }

    Struct after = (Struct) ((Struct) record.value()).get("after");
    assertRecord(after, EXPECTED_FP_AS_STRING);
}
 
Example #21
Source File: OracleConnectorFilterIT.java    From debezium-incubator with Apache License 2.0 4 votes vote down vote up
@Before
public void before() throws SQLException {
    setConsumeTimeout(TestHelper.defaultMessageConsumerPollTimeout(), TimeUnit.SECONDS);
    TestHelper.dropTable(connection, "debezium.table1");
    TestHelper.dropTable(connection, "debezium.table2");
    TestHelper.dropTable(connection, "debezium.table3");

    try {
        adminConnection.execute("DROP USER debezium2 CASCADE");
    }
    catch (SQLException e) {
    }

    adminConnection.execute(
            "CREATE USER debezium2 IDENTIFIED BY dbz",
            "GRANT CONNECT TO debezium2",
            "GRANT CREATE SESSION TO debezium2",
            "GRANT CREATE TABLE TO debezium2",
            "GRANT CREATE SEQUENCE TO debezium2",
            "ALTER USER debezium2 QUOTA 100M ON users",
            "create table debezium2.table2 (id numeric(9,0) not null,name varchar2(1000),primary key (id))",
            "create table debezium2.nopk (id numeric(9,0) not null)",
            "GRANT ALL PRIVILEGES ON debezium2.table2 to debezium",
            "GRANT SELECT ON debezium2.table2 to " + TestHelper.CONNECTOR_USER,
            "GRANT SELECT ON debezium2.nopk to " + TestHelper.CONNECTOR_USER,
            "ALTER TABLE debezium2.table2 ADD SUPPLEMENTAL LOG DATA (ALL) COLUMNS");
    String ddl = "create table debezium.table1 (" +
            "  id numeric(9,0) not null, " +
            "  name varchar2(1000), " +
            "  primary key (id)" +
            ")";

    connection.execute(ddl);
    connection.execute("GRANT SELECT ON debezium.table1 to " + TestHelper.CONNECTOR_USER);
    connection.execute("ALTER TABLE debezium.table1 ADD SUPPLEMENTAL LOG DATA (ALL) COLUMNS");

    ddl = "create table debezium.table2 (" +
            "  id numeric(9,0) not null, " +
            "  name varchar2(1000), " +
            "  primary key (id)" +
            ")";

    connection.execute(ddl);
    connection.execute("GRANT SELECT ON debezium.table2 to  " + TestHelper.CONNECTOR_USER);
    connection.execute("ALTER TABLE debezium.table2 ADD SUPPLEMENTAL LOG DATA (ALL) COLUMNS");

    initializeConnectorTestFramework();
    Testing.Files.delete(TestHelper.DB_HISTORY_PATH);
}
 
Example #22
Source File: AbstractOracleDatatypesTest.java    From debezium-incubator with Apache License 2.0 4 votes vote down vote up
@Test
@FixFor("DBZ-1552")
public void fpTypesAsDouble() throws Exception {
    stopConnector();
    initializeConnectorTestFramework();
    final Configuration config = connectorConfig()
            .with(OracleConnectorConfig.DECIMAL_HANDLING_MODE, DecimalMode.DOUBLE)
            .build();

    start(OracleConnector.class, config);
    assertConnectorIsRunning();

    waitForSnapshotToBeCompleted(TestHelper.CONNECTOR_NAME, TestHelper.SERVER_NAME);

    int expectedRecordCount = 0;

    if (insertRecordsDuringTest()) {
        insertFpTypes();
    }

    Testing.debug("Inserted");
    expectedRecordCount++;

    final SourceRecords records = consumeRecordsByTopic(expectedRecordCount);

    List<SourceRecord> testTableRecords = records.recordsForTopic("server1.DEBEZIUM.TYPE_FP");
    assertThat(testTableRecords).hasSize(expectedRecordCount);
    SourceRecord record = testTableRecords.get(0);

    VerifyRecord.isValid(record);

    // insert
    if (insertRecordsDuringTest()) {
        VerifyRecord.isValidInsert(record, "ID", 1);
    }
    else {
        VerifyRecord.isValidRead(record, "ID", 1);
    }

    Struct after = (Struct) ((Struct) record.value()).get("after");
    assertRecord(after, EXPECTED_FP_AS_DOUBLE);
}
 
Example #23
Source File: SchemaHistoryTopicIT.java    From debezium-incubator with Apache License 2.0 4 votes vote down vote up
@Test
@FixFor("DBZ-1904")
public void snapshotSchemaChanges() throws Exception {
    final int RECORDS_PER_TABLE = 5;
    final int TABLES = 2;
    final int ID_START_1 = 10;
    final Configuration config = TestHelper.defaultConfig()
            .with(OracleConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
            .with(RelationalDatabaseConnectorConfig.TABLE_WHITELIST, "DEBEZIUM\\.TABLE[ABC]")
            .with(RelationalDatabaseConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
            .build();

    for (int i = 0; i < RECORDS_PER_TABLE; i++) {
        final int id = ID_START_1 + i;
        connection.execute(
                "INSERT INTO debezium.tablea VALUES(" + id + ", 'a')");
        connection.execute(
                "INSERT INTO debezium.tableb VALUES(" + id + ", 'b')");
    }

    start(OracleConnector.class, config);
    assertConnectorIsRunning();
    waitForSnapshotToBeCompleted(TestHelper.CONNECTOR_NAME, TestHelper.SERVER_NAME);

    Testing.Print.enable();

    // DDL for 3 tables
    SourceRecords records = consumeRecordsByTopic(3);
    final List<SourceRecord> schemaRecords = records.allRecordsInOrder();
    Assertions.assertThat(schemaRecords).hasSize(3);
    schemaRecords.forEach(record -> {
        Assertions.assertThat(record.topic()).isEqualTo("server1");
        Assertions.assertThat(((Struct) record.key()).getString("databaseName")).isEqualTo("ORCLPDB1");
        Assertions.assertThat(record.sourceOffset().get("snapshot")).isEqualTo(true);
    });
    Assertions.assertThat(((Struct) schemaRecords.get(0).value()).getStruct("source").getString("snapshot")).isEqualTo("true");
    Assertions.assertThat(((Struct) schemaRecords.get(1).value()).getStruct("source").getString("snapshot")).isEqualTo("true");
    Assertions.assertThat(((Struct) schemaRecords.get(2).value()).getStruct("source").getString("snapshot")).isEqualTo("true");
    Assertions.assertThat(((Struct) schemaRecords.get(0).value()).getStruct("source").getString("schema")).isEqualTo("DEBEZIUM");
    Assertions.assertThat(((Struct) schemaRecords.get(0).value()).getString("ddl")).contains("CREATE TABLE");
    Assertions.assertThat(((Struct) schemaRecords.get(0).value()).getString("schemaName")).isEqualTo("DEBEZIUM");

    final List<Struct> tableChanges = ((Struct) schemaRecords.get(0).value()).getArray("tableChanges");
    Assertions.assertThat(tableChanges).hasSize(1);
    Assertions.assertThat(tableChanges.get(0).get("type")).isEqualTo("CREATE");

    records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
    Assertions.assertThat(records.recordsForTopic("server1.DEBEZIUM.TABLEA")).hasSize(RECORDS_PER_TABLE);
    Assertions.assertThat(records.recordsForTopic("server1.DEBEZIUM.TABLEB")).hasSize(RECORDS_PER_TABLE);
    records.recordsForTopic("server1.DEBEZIUM.TABLEB").forEach(record -> {
        assertSchemaMatchesStruct(
                (Struct) ((Struct) record.value()).get("after"),
                SchemaBuilder.struct()
                        .optional()
                        .name("server1.DEBEZIUM.TABLEB.Value")
                        .field("ID", Schema.INT32_SCHEMA)
                        .field("COLB", Schema.OPTIONAL_STRING_SCHEMA)
                        .build());
    });
}
 
Example #24
Source File: SchemaHistoryTopicIT.java    From debezium-incubator with Apache License 2.0 4 votes vote down vote up
@Test
@FixFor("DBZ-1904")
public void snapshotSchemaChanges() throws Exception {
    final int RECORDS_PER_TABLE = 5;
    final int TABLES = 2;
    final int ID_START_1 = 10;
    final Configuration config = TestHelper.defaultConfig()
            .with(Db2ConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
            .with(RelationalDatabaseConnectorConfig.INCLUDE_SCHEMA_CHANGES, true)
            .build();

    for (int i = 0; i < RECORDS_PER_TABLE; i++) {
        final int id = ID_START_1 + i;
        connection.execute(
                "INSERT INTO tablea VALUES(" + id + ", 'a')");
        connection.execute(
                "INSERT INTO tableb VALUES(" + id + ", 'b')");
    }

    start(Db2Connector.class, config);
    assertConnectorIsRunning();
    TestHelper.waitForSnapshotToBeCompleted();

    Testing.Print.enable();

    // DDL for 3 tables
    SourceRecords records = consumeRecordsByTopic(3);
    final List<SourceRecord> schemaRecords = records.allRecordsInOrder();
    Assertions.assertThat(schemaRecords).hasSize(3);
    schemaRecords.forEach(record -> {
        Assertions.assertThat(record.topic()).isEqualTo("testdb");
        Assertions.assertThat(((Struct) record.key()).getString("databaseName")).isEqualTo("TESTDB");
        Assertions.assertThat(record.sourceOffset().get("snapshot")).isEqualTo(true);
    });
    Assertions.assertThat(((Struct) schemaRecords.get(0).value()).getStruct("source").getString("snapshot")).isEqualTo("true");
    Assertions.assertThat(((Struct) schemaRecords.get(1).value()).getStruct("source").getString("snapshot")).isEqualTo("true");
    Assertions.assertThat(((Struct) schemaRecords.get(2).value()).getStruct("source").getString("snapshot")).isEqualTo("true");

    final List<Struct> tableChanges = ((Struct) schemaRecords.get(0).value()).getArray("tableChanges");
    Assertions.assertThat(tableChanges).hasSize(1);
    Assertions.assertThat(tableChanges.get(0).get("type")).isEqualTo("CREATE");

    records = consumeRecordsByTopic(RECORDS_PER_TABLE * TABLES);
    Assertions.assertThat(records.recordsForTopic("testdb.DB2INST1.TABLEA")).hasSize(RECORDS_PER_TABLE);
    Assertions.assertThat(records.recordsForTopic("testdb.DB2INST1.TABLEB")).hasSize(RECORDS_PER_TABLE);
    records.recordsForTopic("testdb.DB2INST1.TABLEB").forEach(record -> {
        assertSchemaMatchesStruct(
                (Struct) ((Struct) record.value()).get("after"),
                SchemaBuilder.struct()
                        .optional()
                        .name("testdb.DB2INST1.TABLEB.Value")
                        .field("ID", Schema.INT32_SCHEMA)
                        .field("COLB", Schema.OPTIONAL_STRING_SCHEMA)
                        .build());
    });
}
 
Example #25
Source File: KafkaSSLTestResource.java    From quarkus with Apache License 2.0 4 votes vote down vote up
@Override
public Map<String, String> start() {
    try {
        File directory = Testing.Files.createTestingDirectory("kafka-data-ssl", true);
        File sslDir = sslDir(directory, true);

        Path ksPath = new File(sslDir, "kafka-keystore.p12").toPath();
        try (InputStream ksStream = getClass().getResourceAsStream("/kafka-keystore.p12")) {
            Files.copy(
                    ksStream,
                    ksPath,
                    StandardCopyOption.REPLACE_EXISTING);
        }

        Path tsPath = new File(sslDir, "kafka-truststore.p12").toPath();
        try (InputStream tsStream = getClass().getResourceAsStream("/kafka-truststore.p12")) {
            Files.copy(
                    tsStream,
                    tsPath,
                    StandardCopyOption.REPLACE_EXISTING);
        }
        String password = "Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L";
        String type = "PKCS12";

        Properties props = new Properties();
        props.setProperty("zookeeper.connection.timeout.ms", "45000");
        //See http://kafka.apache.org/documentation.html#security_ssl for detail
        props.setProperty("listener.security.protocol.map", "CLIENT:SSL");
        props.setProperty("listeners", "CLIENT://:19093");
        props.setProperty("inter.broker.listener.name", "CLIENT");
        props.setProperty(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, ksPath.toString());
        props.setProperty(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, password);
        props.setProperty(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, type);
        props.setProperty(SslConfigs.SSL_KEY_PASSWORD_CONFIG, password);
        props.setProperty(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, tsPath.toString());
        props.setProperty(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, password);
        props.setProperty(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, type);
        props.setProperty(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "");

        kafka = new KafkaCluster()
                .withPorts(2183, 19093)
                .addBrokers(1)
                .usingDirectory(directory)
                .deleteDataUponShutdown(true)
                .withKafkaConfiguration(props)
                .deleteDataPriorToStartup(true)
                .startup();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    return Collections.emptyMap();
}
 
Example #26
Source File: KafkaTestResource.java    From quarkus with Apache License 2.0 4 votes vote down vote up
@Override
public Map<String, String> start() {
    try {
        File directory = Testing.Files.createTestingDirectory("kafka-data", true);
        File sslDir = sslDir(directory, true);

        Path ksPath = new File(sslDir, "ks-keystore.p12").toPath();
        try (InputStream ksStream = getClass().getResourceAsStream("/ks-keystore.p12")) {
            Files.copy(
                    ksStream,
                    ksPath,
                    StandardCopyOption.REPLACE_EXISTING);
        }

        Path tsPath = new File(sslDir, "ks-truststore.p12").toPath();
        try (InputStream tsStream = getClass().getResourceAsStream("/ks-truststore.p12")) {
            Files.copy(
                    tsStream,
                    tsPath,
                    StandardCopyOption.REPLACE_EXISTING);
        }
        String password = "Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L";
        String type = "PKCS12";

        Properties props = new Properties();
        props.setProperty("zookeeper.connection.timeout.ms", "45000");

        // http://kafka.apache.org/documentation.html#security_ssl
        props.setProperty("listener.security.protocol.map", "CLIENT:SSL");
        props.setProperty("listeners", "CLIENT://:19092");
        props.setProperty("inter.broker.listener.name", "CLIENT");
        props.setProperty(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, ksPath.toString());
        props.setProperty(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, password);
        props.setProperty(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, type);
        props.setProperty(SslConfigs.SSL_KEY_PASSWORD_CONFIG, password);
        props.setProperty(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, tsPath.toString());
        props.setProperty(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, password);
        props.setProperty(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, type);
        props.setProperty(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "");

        kafka = new KafkaCluster()
                .withPorts(2182, 19092)
                .addBrokers(1)
                .usingDirectory(directory)
                .deleteDataUponShutdown(true)
                .withKafkaConfiguration(props)
                .deleteDataPriorToStartup(true)
                .startup();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    return Collections.emptyMap();
}