/** * Stop buffering source records, and flush any buffered records by replacing their offset with the provided offset. * Note that this only replaces the record's {@link SourceRecord#sourceOffset() offset} and does not change the * value of the record, which may contain information about the snapshot. * * @param newOffset the offset that reflects that the snapshot has been completed; may not be null * @throws InterruptedException if the thread is interrupted while waiting for the new record to be flushed */ protected synchronized void stopBuffering(Map<String, ?> newOffset) throws InterruptedException { assert newOffset != null; this.buffered.close(record -> { if (record == null) return null; return new SourceRecord(record.sourcePartition(), newOffset, record.topic(), record.kafkaPartition(), record.keySchema(), record.key(), record.valueSchema(), record.value()); }); this.current = this.actual; }
public static void assertEquals(SourceRecord actual, SourceRecord expected, Predicate<String> ignoreFields, Map<String, RecordValueComparator> comparatorsByName, Map<String, RecordValueComparator> comparatorsBySchemaName) { assertThat(actual).isNotNull(); assertThat(expected).isNotNull(); assertEquals(null, actual.sourcePartition(), expected.sourcePartition(), "sourcePartition", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); assertEquals(null, actual.sourceOffset(), expected.sourceOffset(), "sourceOffset", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); assertThat(actual.topic()).isEqualTo(expected.topic()); assertThat(actual.kafkaPartition()).isEqualTo(expected.kafkaPartition()); Schema actualKeySchema = actual.keySchema(); Schema actualValueSchema = actual.valueSchema(); Schema expectedKeySchema = expected.keySchema(); Schema expectedValueSchema = expected.valueSchema(); if (!Objects.equals(actualKeySchema, expectedKeySchema)) { String actualStr = SchemaUtil.asString(actualKeySchema); String expectedStr = SchemaUtil.asString(expectedKeySchema); assertThat(actualStr).as("The key schema for record with key " + SchemaUtil.asString(actual.key()) + " did not match expected schema").isEqualTo(expectedStr); } if (!Objects.equals(actualValueSchema, expectedValueSchema)) { String actualStr = SchemaUtil.asString(actualValueSchema); String expectedStr = SchemaUtil.asString(expectedValueSchema); assertThat(actualStr).isEqualTo(expectedStr); } assertEquals(actualKeySchema, actual.key(), expected.key(), "key", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); assertEquals(actualValueSchema, actual.value(), expected.value(), "value", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); }
@Override public synchronized void markProcessed(SourceRecord record) throws InterruptedException { task.commitRecord(record); recordsSinceLastCommit += 1; offsetWriter.offset(record.sourcePartition(), record.sourceOffset()); }
/** * Verify that the given {@link SourceRecord} is a valid tombstone, meaning it has a non-null key and key schema but null * value and value schema. * * @param record the source record; may not be null */ public static void isValidTombstone(SourceRecord record) { assertThat(record.key()).isNotNull(); assertThat(record.keySchema()).isNotNull(); assertThat(record.value()).isNull(); assertThat(record.valueSchema()).isNull(); }
/** * Produce an empty record to the heartbeat topic. * */ private SourceRecord heartbeatRecord(Map<String, ?> sourcePartition, Map<String, ?> sourceOffset) { final Integer partition = 0; return new SourceRecord(sourcePartition, sourceOffset, topicName, partition, KEY_SCHEMA, serverNameKey(key), null, null); }
@Test @FixFor("DBZ-1086") public void testKeyNullValue() { final ByLogicalTableRouter<SourceRecord> router = new ByLogicalTableRouter<>(); final Map<String, String> props = new HashMap<>(); props.put("topic.regex", "(.*)customers_shard(.*)"); props.put("topic.replacement", "$1customers_all_shards"); props.put("key.field.name", "shard_id"); props.put("key.field.regex", "(.*)customers_shard_(.*)"); props.put("key.field.replacement", "$2"); router.configure(props); SourceRecord record1 = new SourceRecord( new HashMap<>(), new HashMap<>(), "mysql-server-1.inventory.customers_shard_1", null, null, null, null ); SourceRecord transformed1 = router.apply(record1); assertThat(transformed1).isNotNull(); assertThat(transformed1.topic()).isEqualTo("mysql-server-1.inventory.customers_all_shards"); assertThat(transformed1.keySchema()).isNull(); assertThat(transformed1.key()).isNull(); }
/** * @see <a href="https://docs.mongodb.com/v3.6/reference/operator/update/pop/#up._S_pop">MongoDB operator array update $pop</a> */ @Test public void shouldTransformOperationPop() throws InterruptedException { SourceRecord updateRecord = executeSimpleUpdateOperation( "{'$pop': {dataArrayOfStr: -1}}" ); final SourceRecord transformedUpdate = transformation.apply(updateRecord); final Struct transformedUpdateValue = (Struct) transformedUpdate.value(); final Schema valueSchema = transformedUpdate.valueSchema(); VerifyRecord.assertConnectSchemasAreEqual("id", valueSchema.field("id").schema(), Schema.OPTIONAL_INT32_SCHEMA); VerifyRecord.assertConnectSchemasAreEqual("dataArrayOfStr", valueSchema.field("dataArrayOfStr").schema(), SchemaBuilder.array(Schema.OPTIONAL_STRING_SCHEMA).optional().build()); assertThat(transformedUpdateValue.get("id")).isEqualTo(1); assertThat(transformedUpdateValue.get("dataArrayOfStr")).isEqualTo(Arrays.asList("c", "e")); }
); final Struct key1 = (Struct)record1.key(); final Struct value1 = (Struct)record1.value(); assertRecord(key1, expectedKey1); assertRecord((Struct)value1.get("after"), expectedRow1); assertThat(record1.sourceOffset()).includes( MapAssert.entry("snapshot", true), MapAssert.entry("snapshot_completed", i == INITIAL_RECORDS_PER_TABLE - 1));
protected void verifyFromInitialSync(SourceRecord record, AtomicBoolean foundLast) { if (record.sourceOffset().containsKey(SourceInfo.INITIAL_SYNC)) { assertThat(record.sourceOffset().containsKey(SourceInfo.INITIAL_SYNC)).isTrue(); Struct value = (Struct) record.value(); assertThat(value.getStruct(Envelope.FieldName.SOURCE).getBoolean(SourceInfo.INITIAL_SYNC)).isTrue(); } else { // Only the last record in the initial sync should be marked as not being part of the initial sync ... assertThat(foundLast.getAndSet(true)).isFalse(); } }
private void changeSourceToLastSnapshotRecord(SourceRecord currentRecord) { final Struct envelope = (Struct)currentRecord.value(); final Struct source = (Struct)envelope.get("source"); if (source.getBoolean(SourceInfo.LAST_SNAPSHOT_RECORD_KEY) != null) { source.put(SourceInfo.LAST_SNAPSHOT_RECORD_KEY, true); } }
public void add(SourceRecord record) { ++numRecords; lastOffset = record.sourceOffset(); }
private Map<String, List<SourceRecord>> recordsByTopic(final int expectedRecordsCount, TestConsumer consumer) { final Map<String, List<SourceRecord>> recordsByTopic = new HashMap<>(); for (int i = 0; i < expectedRecordsCount; i++) { final SourceRecord record = consumer.remove(); recordsByTopic.putIfAbsent(record.topic(), new ArrayList<SourceRecord>()); recordsByTopic.compute(record.topic(), (k, v) -> { v.add(record); return v; }); } return recordsByTopic; }
private void assertHeartBeatRecordInserted() { assertFalse("records not generated", consumer.isEmpty()); SourceRecord heartbeat = consumer.remove(); assertEquals("__debezium-heartbeat." + TestHelper.TEST_SERVER, heartbeat.topic()); Struct key = (Struct) heartbeat.key(); assertThat(key.get("serverName")).isEqualTo(TestHelper.TEST_SERVER); }
@Test public void testPollRecordReturnedNoIncludeHeaders() throws Exception { mockConsumerInitialization(); EasyMock.expect(consumer.poll(Duration.ofMillis(POLL_LOOP_TIMEOUT_MS_VALUE))).andReturn(createTestRecords()); replayAll(); objectUnderTest.start(opts); List<SourceRecord> records = objectUnderTest.poll(); SourceRecord testRecord = records.get(0); assertEquals(String.format("%s:%d", FIRST_TOPIC, FIRST_PARTITION), testRecord.sourcePartition().get(TOPIC_PARTITION_KEY)); assertEquals(FIRST_OFFSET, testRecord.sourceOffset().get(OFFSET_KEY)); assertEquals(0, testRecord.headers().size()); verifyAll(); }
protected static Struct keyFor(SourceRecord record) { return (Struct) record.key(); }
public SourceRecordAssert valueAfterFieldSchemaIsEqualTo(Schema expectedSchema) { Schema valueSchema = record.valueSchema(); Schema afterFieldSchema = valueSchema.field("after").schema(); VerifyRecord.assertConnectSchemasAreEqual(null, afterFieldSchema, expectedSchema); return this; } }
@Override public void accept(List<SourceRecord> records) { if (records.isEmpty()) return; if (!logger.isInfoEnabled()) return; summaryByReplicaSet.clear(); records.forEach(record -> { String replicaSetName = SourceInfo.replicaSetNameForPartition(record.sourcePartition()); if (replicaSetName != null) { summaryByReplicaSet.computeIfAbsent(replicaSetName, rsName -> new ReplicaSetSummary()).add(record); } }); if (!summaryByReplicaSet.isEmpty()) { PreviousContext prevContext = taskContext.configureLoggingContext("task"); try { summaryByReplicaSet.forEach((rsName, summary) -> { logger.info("{} records sent for replica set '{}', last offset: {}", summary.recordCount(), rsName, summary.lastOffset()); }); } finally { prevContext.restore(); } } } }
try { byte[] keyBytes = keyJsonConverter.fromConnectData(record.topic(), record.keySchema(), record.key()); keyJson = keyJsonDeserializer.deserialize(record.topic(), keyBytes); byte[] valueBytes = valueJsonConverter.fromConnectData(record.topic(), record.valueSchema(), record.value()); valueJson = valueJsonDeserializer.deserialize(record.topic(), valueBytes); message.set("key", keyJson); message.set("value", valueJson); Testing.print("Message on topic '" + record.topic() + "':"); Testing.print(prettyJson(message)); Testing.print("Problem with message on topic '" + record.topic() + "':"); if (keyJson != null) { Testing.print("valid key = " + prettyJson(keyJson));
/** {@inheritDoc} */ @Override public List<SourceRecord> poll() throws InterruptedException { ArrayList<SourceRecord> records = new ArrayList<>(evtBatchSize); ArrayList<CacheEvent> evts = new ArrayList<>(evtBatchSize); if (stopped) return records; try { if (evtBuf.drainTo(evts, evtBatchSize) > 0) { for (CacheEvent evt : evts) { // schema and keys are ignored. for (String topic : topics) records.add(new SourceRecord(srcPartition, offset, topic, null, evt)); } return records; } } catch (IgniteException e) { log.error("Error when polling event queue!", e); } // for shutdown. return null; }