/** * Set the database identifier. This is typically called once upon initialization. * * @param logicalId the logical identifier for the database; may not be null */ public void setServerName(String logicalId) { this.serverName = logicalId; sourcePartition = Collect.hashMapOf(SERVER_PARTITION_KEY, serverName); }
/** * Get the Kafka Connect detail about the source "partition" for the given database in the replica set. If the database is * not known, this method records the new partition. * * @param replicaSetName the name of the replica set name for which the partition is to be obtained; may not be null * @return the source partition information; never null */ public Map<String, String> partition(String replicaSetName) { if (replicaSetName == null) throw new IllegalArgumentException("Replica set name may not be null"); return sourcePartitionsByReplicaSetName.computeIfAbsent(replicaSetName, rsName -> { return Collect.hashMapOf(SERVER_ID_KEY, serverName, REPLICA_SET_NAME, rsName); }); }
protected Map<String, String> vars(String var1, String val1) { return Collect.hashMapOf(var1, val1); }
protected void setLogPosition(int index) { this.position = Collect.hashMapOf("filename", "my-txn-file.log", "position", index); }
protected Map<String, String> vars(String var1, String val1, String var2, String val2) { return Collect.hashMapOf(var1, val1, var2, val2); }
/** * Get the Kafka Connect detail about the source "offset" for the named database, which describes the given position in the * database where we have last read. If the database has not yet been seen, this records the starting position * for that database. However, if there is a position for the database, the offset representation is returned. * * @param replicaSetName the name of the replica set name for which the new offset is to be obtained; may not be null * @return a copy of the current offset for the database; never null */ public Map<String, ?> lastOffset(String replicaSetName) { Position existing = positionsByReplicaSetName.get(replicaSetName); if (existing == null) existing = INITIAL_POSITION; if (isInitialSyncOngoing(replicaSetName)) { return Collect.hashMapOf(TIMESTAMP, Integer.valueOf(existing.getTime()), ORDER, Integer.valueOf(existing.getInc()), OPERATION_ID, existing.getOperationId(), INITIAL_SYNC, true); } return Collect.hashMapOf(TIMESTAMP, Integer.valueOf(existing.getTime()), ORDER, Integer.valueOf(existing.getInc()), OPERATION_ID, existing.getOperationId()); }
private AvailableVariables builtInVariables() { Map<String, String> builtIns = Collect.hashMapOf("dbz.test.name", name()); System.getProperties().forEach((key, value) -> builtIns.put(key.toString(), value.toString())); return builtIns::get; }
@Override public void initializeStorage() { super.initializeStorage(); try (AdminClient admin = AdminClient.create(this.producerConfig.asProperties())) { // Find default replication factor Config brokerConfig = getKafkaBrokerConfig(admin); final short replicationFactor = Short.parseShort(brokerConfig.get(DEFAULT_TOPIC_REPLICATION_FACTOR_PROP_NAME).value()); // Create topic final NewTopic topic = new NewTopic(topicName, (short)1, replicationFactor); topic.configs(Collect.hashMapOf("cleanup.policy", "delete", "retention.ms", Long.toString(Long.MAX_VALUE), "retention.bytes", "-1")); admin.createTopics(Collections.singleton(topic)); logger.info("Database history topic '{}' created", topic); } catch (Exception e) { throw new ConnectException("Creation of database history topic failed, please create the topic manually", e); } }
@Override public Map<String, ?> getOffset() { if (sourceInfo.isSnapshot()) { return Collect.hashMapOf( SourceInfo.SNAPSHOT_KEY, true, SNAPSHOT_COMPLETED_KEY, snapshotCompleted, SourceInfo.COMMIT_LSN_KEY, sourceInfo.getCommitLsn().toString() ); } else { return Collect.hashMapOf( SourceInfo.COMMIT_LSN_KEY, sourceInfo.getCommitLsn().toString(), SourceInfo.CHANGE_LSN_KEY, sourceInfo.getChangeLsn() == null ? null : sourceInfo.getChangeLsn().toString() ); } }
@Before public void beforeEach() throws Exception { source = Collect.hashMapOf("server", "my-server"); setLogPosition(0); topicName = "schema-changes-topic"; File dataDir = Testing.Files.createTestingDirectory("history_cluster"); Testing.Files.delete(dataDir); // Configure the extra properties to kafka = new KafkaCluster().usingDirectory(dataDir) .deleteDataPriorToStartup(true) .deleteDataUponShutdown(true) .addBrokers(1) .withKafkaConfiguration(Collect.propertiesOf("auto.create.topics.enable", "false")) .startup(); history = new KafkaDatabaseHistory(); }
Map<String, String> partition = Collect.hashMapOf(SourceInfo.SERVER_PARTITION_KEY, serverName); Map<String, ?> offsets = getRestartOffset(context.offsetStorageReader().offset(partition)); final SourceInfo source;
BsonTimestamp ts = new BsonTimestamp(1000, 1); ObjectId objId = new ObjectId(); final Document obj = new Document().append("$set", new Document(Collect.hashMapOf("address.city", "Canberra", "address.name", "James", "address.city2.part", 3)));
@Test public void shouldGenerateSnapshotsForCustomDatatypes() throws Exception { final PostgresConnectorConfig config = new PostgresConnectorConfig( TestHelper.defaultConfig() .with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.INITIAL) .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true) .build() ); context = new PostgresTaskContext( config, TestHelper.getSchema(config), PostgresTopicSelector.create(config) ); snapshotProducer = new RecordsSnapshotProducer(context, new SourceInfo(TestHelper.TEST_SERVER, TestHelper.TEST_DATABASE), false); final TestConsumer consumer = testConsumer(1, "public"); TestHelper.execute(INSERT_CUSTOM_TYPES_STMT); //then start the producer and validate all records are there snapshotProducer.start(consumer, e -> {}); consumer.await(TestHelper.waitTimeForRecords() * 30, TimeUnit.SECONDS); final Map<String, List<SchemaAndValueField>> expectedValuesByTopicName = Collect.hashMapOf("public.custom_table", schemasAndValuesForCustomTypes()); consumer.process(record -> assertReadRecord(record, expectedValuesByTopicName)); }
@Override public Map<String, String> getEventSourcePosition(DataCollectionId source, OffsetContext offset, Object key, Struct value) { if (value == null) { return null; } final Struct sourceInfo = value.getStruct(Envelope.FieldName.SOURCE); if (source == null) { return null; } final Long scn = sourceInfo.getInt64(SourceInfo.SCN_KEY); return Collect.hashMapOf( SourceInfo.SCN_KEY, scn == null ? "null" : Long.toString(scn) ); }
private AvailableVariables builtInVariables() { Map<String, String> builtIns = Collect.hashMapOf("dbz.test.name", name()); System.getProperties().forEach((key, value) -> builtIns.put(key.toString(), value.toString())); return builtIns::get; }
@Override public void initializeStorage() { super.initializeStorage(); try (AdminClient admin = AdminClient.create(this.producerConfig.asProperties())) { // Find default replication factor Config brokerConfig = getKafkaBrokerConfig(admin); final short replicationFactor = Short.parseShort(brokerConfig.get(DEFAULT_TOPIC_REPLICATION_FACTOR_PROP_NAME).value()); // Create topic final NewTopic topic = new NewTopic(topicName, (short)1, replicationFactor); topic.configs(Collect.hashMapOf("cleanup.policy", "delete", "retention.ms", Long.toString(Long.MAX_VALUE), "retention.bytes", "-1")); admin.createTopics(Collections.singleton(topic)); logger.info("Database history topic '{}' created", topic); } catch (Exception e) { throw new ConnectException("Creation of database history topic failed, please create the topic manually", e); } }
Map<String, Integer> topicCounts = Collect.hashMapOf( "test_server.public.first_table", 0, "test_server.public.partitioned", 0,
@Test public void shouldGenerateSnapshotsForCustomDatatypes() throws Exception { final PostgresConnectorConfig config = new PostgresConnectorConfig( TestHelper.defaultConfig() .with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.INITIAL) .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true) .build() ); context = new PostgresTaskContext( config, TestHelper.getSchema(config), PostgresTopicSelector.create(config) ); snapshotProducer = new RecordsSnapshotProducer(context, new SourceInfo(TestHelper.TEST_SERVER, TestHelper.TEST_DATABASE), false); final TestConsumer consumer = testConsumer(1, "public"); TestHelper.execute(INSERT_CUSTOM_TYPES_STMT); //then start the producer and validate all records are there snapshotProducer.start(consumer, e -> {}); consumer.await(TestHelper.waitTimeForRecords() * 30, TimeUnit.SECONDS); final Map<String, List<SchemaAndValueField>> expectedValuesByTopicName = Collect.hashMapOf("public.custom_table", schemasAndValuesForCustomTypes()); consumer.process(record -> assertReadRecord(record, expectedValuesByTopicName)); }