private Map<String, List<SourceRecord>> recordsByTopic(final int expectedRecordsCount, TestConsumer consumer) { final Map<String, List<SourceRecord>> recordsByTopic = new HashMap<>(); for (int i = 0; i < expectedRecordsCount; i++) { final SourceRecord record = consumer.remove(); recordsByTopic.putIfAbsent(record.topic(), new ArrayList<SourceRecord>()); recordsByTopic.compute(record.topic(), (k, v) -> { v.add(record); return v; }); } return recordsByTopic; }
public boolean add(SourceRecord record) { if (topicName.equals(record.topic())) { this.sourceRecords.add(record); String dbName = getAffectedDatabase(record); sourceRecordsByDbName.computeIfAbsent(dbName, db->new ArrayList<>()).add(record); return true; } return false; }
public void add(SourceRecord record) { records.add(record); recordsByTopic.computeIfAbsent(record.topic(), (topicName) -> new ArrayList<SourceRecord>()).add(record); String dbName = getAffectedDatabase(record); if (dbName != null) ddlRecordsByDbName.computeIfAbsent(dbName, key -> new ArrayList<>()).add(record); }
public void add(SourceRecord record) { TableId tableId = tableIdFromTopic.apply(record.topic()); if (tableId != null) { this.sourceRecords.add(record); getOrCreate(tableId).add(record); } }
private void assertReadRecord(SourceRecord record, Map<String, List<SchemaAndValueField>> expectedValuesByTopicName) { VerifyRecord.isValidRead(record, PK_FIELD, 1); String topicName = record.topic().replace(TestHelper.TEST_SERVER + ".", ""); List<SchemaAndValueField> expectedValuesAndSchemasForTopic = expectedValuesByTopicName.get(topicName); assertNotNull("No expected values for " + topicName + " found", expectedValuesAndSchemasForTopic); assertRecordSchemaAndValues(expectedValuesAndSchemasForTopic, record, Envelope.FieldName.AFTER); }
@Test public void shouldReceiveChangesForUpdatesWithPKChanges() throws Exception { consumer = testConsumer(3); recordsProducer.start(consumer, blackHole); executeAndWait("UPDATE test_table SET text = 'update', pk = 2"); String topicName = topicName("public.test_table"); // first should be a delete of the old pk SourceRecord deleteRecord = consumer.remove(); assertEquals(topicName, deleteRecord.topic()); VerifyRecord.isValidDelete(deleteRecord, PK_FIELD, 1); // followed by a tombstone of the old pk SourceRecord tombstoneRecord = consumer.remove(); assertEquals(topicName, tombstoneRecord.topic()); VerifyRecord.isValidTombstone(tombstoneRecord, PK_FIELD, 1); // and finally insert of the new value SourceRecord insertRecord = consumer.remove(); assertEquals(topicName, insertRecord.topic()); VerifyRecord.isValidInsert(insertRecord, PK_FIELD, 2); }
@Override public void accept(ChangeEvent event) { final SourceRecord record = event.getRecord(); if ( ignoreTopic(record.topic()) ) { return; } if (latch.getCount() == 0) { if (ignoreExtraRecords) { records.add(record); } else { fail("received more events than expected"); } } else { records.add(record); latch.countDown(); } }
@Test public void shouldReceiveChangesForDeletes() throws Exception { // add a new entry and remove both String statements = "INSERT INTO test_table (text) VALUES ('insert2');" + "DELETE FROM test_table WHERE pk > 0;"; consumer = testConsumer(5); recordsProducer.start(consumer, blackHole); executeAndWait(statements); String topicPrefix = "public.test_table"; String topicName = topicName(topicPrefix); assertRecordInserted(topicPrefix, PK_FIELD, 2); // first entry removed SourceRecord record = consumer.remove(); assertEquals(topicName, record.topic()); VerifyRecord.isValidDelete(record, PK_FIELD, 1); // followed by a tombstone record = consumer.remove(); assertEquals(topicName, record.topic()); VerifyRecord.isValidTombstone(record, PK_FIELD, 1); // second entry removed record = consumer.remove(); assertEquals(topicName, record.topic()); VerifyRecord.isValidDelete(record, PK_FIELD, 2); // followed by a tombstone record = consumer.remove(); assertEquals(topicName, record.topic()); VerifyRecord.isValidTombstone(record, PK_FIELD, 2); }
private SourceRecord assertRecordInserted(String expectedTopicName, String pkColumn, Integer pk) throws InterruptedException { assertFalse("records not generated", consumer.isEmpty()); SourceRecord insertedRecord = consumer.remove(); assertEquals(topicName(expectedTopicName), insertedRecord.topic()); if (pk != null) { VerifyRecord.isValidInsert(insertedRecord, pkColumn, pk); } else { VerifyRecord.isValidInsert(insertedRecord); } return insertedRecord; }
@Test @FixFor("DBZ-1086") public void testKeyNullValue() { final ByLogicalTableRouter<SourceRecord> router = new ByLogicalTableRouter<>(); final Map<String, String> props = new HashMap<>(); props.put("topic.regex", "(.*)customers_shard(.*)"); props.put("topic.replacement", "$1customers_all_shards"); props.put("key.field.name", "shard_id"); props.put("key.field.regex", "(.*)customers_shard_(.*)"); props.put("key.field.replacement", "$2"); router.configure(props); SourceRecord record1 = new SourceRecord( new HashMap<>(), new HashMap<>(), "mysql-server-1.inventory.customers_shard_1", null, null, null, null ); SourceRecord transformed1 = router.apply(record1); assertThat(transformed1).isNotNull(); assertThat(transformed1.topic()).isEqualTo("mysql-server-1.inventory.customers_all_shards"); assertThat(transformed1.keySchema()).isNull(); assertThat(transformed1.key()).isNull(); }
/** * Stop buffering source records, and flush any buffered records by replacing their offset with the provided offset. * Note that this only replaces the record's {@link SourceRecord#sourceOffset() offset} and does not change the * value of the record, which may contain information about the snapshot. * * @param newOffset the offset that reflects that the snapshot has been completed; may not be null * @throws InterruptedException if the thread is interrupted while waiting for the new record to be flushed */ protected synchronized void stopBuffering(Map<String, ?> newOffset) throws InterruptedException { assert newOffset != null; this.buffered.close(record -> { if (record == null) return null; return new SourceRecord(record.sourcePartition(), newOffset, record.topic(), record.kafkaPartition(), record.keySchema(), record.key(), record.valueSchema(), record.value()); }); this.current = this.actual; }
private void assertHeartBeatRecordInserted() { assertFalse("records not generated", consumer.isEmpty()); SourceRecord heartbeat = consumer.remove(); assertEquals("__debezium-heartbeat." + TestHelper.TEST_SERVER, heartbeat.topic()); Struct key = (Struct) heartbeat.key(); assertThat(key.get("serverName")).isEqualTo(TestHelper.TEST_SERVER); }
/** * Utility method to replace the offset in the given record with the latest. This is used on the last record produced * during the snapshot. * * @param record the record * @return the updated record */ protected SourceRecord replaceOffset(SourceRecord record) { if (record == null) return null; Map<String, ?> newOffset = context.source().offset(); return new SourceRecord(record.sourcePartition(), newOffset, record.topic(), record.kafkaPartition(), record.keySchema(), record.key(), record.valueSchema(), record.value()); }
@Test @FixFor("DBZ-259") public void shouldProcessIntervalDelete() throws Exception { final String statements = "INSERT INTO table_with_interval VALUES (default, 'Foo', default);" + "INSERT INTO table_with_interval VALUES (default, 'Bar', default);" + "DELETE FROM table_with_interval WHERE id = 1;"; consumer = testConsumer(4); recordsProducer.start(consumer, blackHole); executeAndWait(statements); final String topicPrefix = "public.table_with_interval"; final String topicName = topicName(topicPrefix); final String pk = "id"; assertRecordInserted(topicPrefix, pk, 1); assertRecordInserted(topicPrefix, pk, 2); // first entry removed SourceRecord record = consumer.remove(); assertEquals(topicName, record.topic()); VerifyRecord.isValidDelete(record, pk, 1); // followed by a tombstone record = consumer.remove(); assertEquals(topicName, record.topic()); VerifyRecord.isValidTombstone(record, pk, 1); }
@Test public void shouldReceiveChangesForDefaultValues() throws Exception { String statements = "ALTER TABLE test_table REPLICA IDENTITY FULL;" + "ALTER TABLE test_table ADD COLUMN default_column TEXT DEFAULT 'default';" + "INSERT INTO test_table (text) VALUES ('update');"; consumer = testConsumer(1); recordsProducer.start(consumer, blackHole); executeAndWait(statements); SourceRecord insertRecord = consumer.remove(); assertEquals(topicName("public.test_table"), insertRecord.topic()); VerifyRecord.isValidInsert(insertRecord, PK_FIELD, 2); List<SchemaAndValueField> expectedSchemaAndValues = Arrays.asList( new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "update"), new SchemaAndValueField("default_column", SchemaBuilder.OPTIONAL_STRING_SCHEMA ,"default")); assertRecordSchemaAndValues(expectedSchemaAndValues, insertRecord, Envelope.FieldName.AFTER); }
@Test @FixFor("DBZ-582") public void shouldReceiveChangesForUpdatesWithPKChangesWithoutTombstone() throws Exception { PostgresConnectorConfig config = new PostgresConnectorConfig(TestHelper.defaultConfig() .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true) .with(CommonConnectorConfig.TOMBSTONES_ON_DELETE, false) .build() ); setupRecordsProducer(config); consumer = testConsumer(2); recordsProducer.start(consumer, blackHole); executeAndWait("UPDATE test_table SET text = 'update', pk = 2"); String topicName = topicName("public.test_table"); // first should be a delete of the old pk SourceRecord deleteRecord = consumer.remove(); assertEquals(topicName, deleteRecord.topic()); VerifyRecord.isValidDelete(deleteRecord, PK_FIELD, 1); // followed by insert of the new value SourceRecord insertRecord = consumer.remove(); assertEquals(topicName, insertRecord.topic()); VerifyRecord.isValidInsert(insertRecord, PK_FIELD, 2); }
@Test @FixFor("DBZ-582") public void shouldReceiveChangesForDeletesWithoutTombstone() throws Exception { PostgresConnectorConfig config = new PostgresConnectorConfig(TestHelper.defaultConfig() .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true) .with(CommonConnectorConfig.TOMBSTONES_ON_DELETE, false) .build() ); setupRecordsProducer(config); // add a new entry and remove both String statements = "INSERT INTO test_table (text) VALUES ('insert2');" + "DELETE FROM test_table WHERE pk > 0;"; consumer = testConsumer(3); recordsProducer.start(consumer, blackHole); executeAndWait(statements); String topicPrefix = "public.test_table"; String topicName = topicName(topicPrefix); assertRecordInserted(topicPrefix, PK_FIELD, 2); // first entry removed SourceRecord record = consumer.remove(); assertEquals(topicName, record.topic()); VerifyRecord.isValidDelete(record, PK_FIELD, 1); // second entry removed record = consumer.remove(); assertEquals(topicName, record.topic()); VerifyRecord.isValidDelete(record, PK_FIELD, 2); }
@Test(timeout = 30000) public void shouldReceiveChangesForInsertsWithPostgisTypes() throws Exception { TestHelper.executeDDL("postgis_create_tables.ddl"); consumer = testConsumer(1, "public"); // spatial_ref_sys produces a ton of records in the postgis schema consumer.setIgnoreExtraRecords(true); recordsProducer.start(consumer, blackHole); // need to wait for all the spatial_ref_sys to flow through and be ignored. // this exceeds the normal 2s timeout. TestHelper.execute("INSERT INTO public.dummy_table DEFAULT VALUES;"); consumer.await(TestHelper.waitTimeForRecords() * 10, TimeUnit.SECONDS); while (true) { if (!consumer.isEmpty()) { SourceRecord record = consumer.remove(); if (record.topic().endsWith(".public.dummy_table")) { break; } } } // now do it for actual testing // postgis types consumer.expects(1); assertInsert(INSERT_POSTGIS_TYPES_STMT, 1, schemaAndValuesForPostgisTypes()); }
@Test(timeout = 30000) public void shouldReceiveChangesForInsertsWithPostgisArrayTypes() throws Exception { TestHelper.executeDDL("postgis_create_tables.ddl"); consumer = testConsumer(1, "public"); // spatial_ref_sys produces a ton of records in the postgis schema consumer.setIgnoreExtraRecords(true); recordsProducer.start(consumer, blackHole); // need to wait for all the spatial_ref_sys to flow through and be ignored. // this exceeds the normal 2s timeout. TestHelper.execute("INSERT INTO public.dummy_table DEFAULT VALUES;"); consumer.await(TestHelper.waitTimeForRecords() * 10, TimeUnit.SECONDS); while (true) { if (!consumer.isEmpty()) { SourceRecord record = consumer.remove(); if (record.topic().endsWith(".public.dummy_table")) { break; } } } // now do it for actual testing // postgis types consumer.expects(1); assertInsert(INSERT_POSTGIS_ARRAY_TYPES_STMT, 1, schemaAndValuesForPostgisArrayTypes()); }
/** * Serialize the source record to document form. * * @param record the record; may not be null * @param keyConverter the converter for the record key's schema and payload * @param valueConverter the converter for the record value's schema and payload * @return the document form of the source record; never null * @throws IOException if there is an error converting the key or value */ private Document serializeSourceRecord(SourceRecord record, SchemaAndValueConverter keyConverter, SchemaAndValueConverter valueConverter) throws IOException { Document keyAndSchema = keyConverter.serialize(record.topic(), record.keySchema(), record.key()); Document valueAndSchema = valueConverter.serialize(record.topic(), record.valueSchema(), record.value()); Document sourcePartition = Document.create().putAll(record.sourcePartition()); Document sourceOffset = Document.create().putAll(record.sourceOffset()); Document parent = Document.create(); parent.set("sourcePartition", sourcePartition); parent.set("sourceOffset", sourceOffset); parent.set("topic", record.topic()); parent.set("kafkaPartition", record.kafkaPartition()); parent.set("keySchema", keyAndSchema.getDocument("schema")); parent.set("key", keyAndSchema.getDocument("payload")); parent.set("valueSchema", valueAndSchema.getDocument("schema")); parent.set("value", valueAndSchema.getDocument("payload")); return parent; }