public void add(SourceRecord record) { ++numRecords; lastOffset = record.sourceOffset(); }
@Override public List<SourceRecord> poll() throws InterruptedException { final List<DataChangeEvent> records = queue.poll(); final List<SourceRecord> sourceRecords = records.stream() .map(DataChangeEvent::getRecord) .collect(Collectors.toList()); if (!sourceRecords.isEmpty()) { this.lastOffset = sourceRecords.get(sourceRecords.size() - 1).sourceOffset(); } return sourceRecords; }
@Override public boolean accepts(SourceRecord ourSourceRecord) { // we assume if we ever end up near the end of the binlog, then we will remain there. if (!thisReaderNearEnd.get()) { Long sourceRecordTimestamp = (Long) ourSourceRecord.sourceOffset().get(SourceInfo.TIMESTAMP_KEY); Instant recordTimestamp = Instant.ofEpochSecond(sourceRecordTimestamp); Instant now = Instant.now(); Duration durationToEnd = Duration.between(recordTimestamp, now); if (durationToEnd.compareTo(minHaltingDuration) <= 0) { // we are within minHaltingDuration of the end LOGGER.debug("Parallel halting predicate: this reader near end"); thisReaderNearEnd.set(true); } } // return false if both readers are near end, true otherwise. return !(thisReaderNearEnd.get() && otherReaderNearEnd.get()); } }
@Override public boolean accepts(SourceRecord sourceRecord) { Document offsetDocument = SourceInfo.createDocumentFromOffset(sourceRecord.sourceOffset()); // .isPositionAtOrBefore is true IFF leadingReaderFinalOffsetDocument <= offsetDocument // we should stop (return false) IFF leadingReaderFinalOffsetDocument <= offsetDocument return ! SourceInfo.isPositionAtOrBefore(leadingReaderFinalOffsetDocument, offsetDocument, gtidFilter); } }
protected void assertOffset(SourceRecord record, String offsetField, Object expectedValue) { Map<String,?> offset = record.sourceOffset(); Object value = offset.get(offsetField); assertSameValue(value,expectedValue); }
@Override public synchronized void markProcessed(SourceRecord record) throws InterruptedException { task.commitRecord(record); recordsSinceLastCommit += 1; offsetWriter.offset(record.sourcePartition(), record.sourceOffset()); }
@Override protected void pollComplete(List<SourceRecord> batch) { // Record a bit about this batch ... int batchSize = batch.size(); recordCounter += batchSize; totalRecordCounter.addAndGet(batchSize); if (batchSize > 0) { SourceRecord lastRecord = batch.get(batchSize - 1); lastOffset = lastRecord.sourceOffset(); if (pollOutputDelay.hasElapsed()) { // We want to record the status ... long millisSinceLastOutput = clock.currentTimeInMillis() - previousOutputMillis; try { context.temporaryLoggingContext("binlog", () -> { logger.info("{} records sent during previous {}, last recorded offset: {}", recordCounter, Strings.duration(millisSinceLastOutput), lastOffset); }); } finally { recordCounter = 0; previousOutputMillis += millisSinceLastOutput; } } } }
protected void assertOffset(SourceRecord record, Map<String,?> expectedOffset) { Map<String,?> offset = record.sourceOffset(); assertThat(offset).isEqualTo(expectedOffset); }
protected void verifyFromInitialSync(SourceRecord record, AtomicBoolean foundLast) { if (record.sourceOffset().containsKey(SourceInfo.INITIAL_SYNC)) { assertThat(record.sourceOffset().containsKey(SourceInfo.INITIAL_SYNC)).isTrue(); Struct value = (Struct) record.value(); assertThat(value.getStruct(Envelope.FieldName.SOURCE).getBoolean(SourceInfo.INITIAL_SYNC)).isTrue(); } else { // Only the last record in the initial sync should be marked as not being part of the initial sync ... assertThat(foundLast.getAndSet(true)).isFalse(); } }
sb.append('{'); appendFirst("sourcePartition", record.sourcePartition()); appendAdditional("sourceOffset", record.sourceOffset()); appendAdditional("topic", record.topic()); appendAdditional("kafkaPartition", record.kafkaPartition());
protected void verifyNotFromInitialSync(SourceRecord record) { assertThat(record.sourceOffset().containsKey(SourceInfo.INITIAL_SYNC)).isFalse(); Struct value = (Struct) record.value(); assertThat(value.getStruct(Envelope.FieldName.SOURCE).getBoolean(SourceInfo.INITIAL_SYNC)).isNull(); }
protected void assertRecordOffsetAndSnapshotSource(SourceRecord record, boolean shouldBeSnapshot, boolean shouldBeLastSnapshotRecord) { Map<String, ?> offset = record.sourceOffset(); assertNotNull(offset.get(SourceInfo.TXID_KEY)); assertNotNull(offset.get(SourceInfo.TIMESTAMP_KEY)); assertNotNull(offset.get(SourceInfo.LSN_KEY)); Object snapshot = offset.get(SourceInfo.SNAPSHOT_KEY); Object lastSnapshotRecord = offset.get(SourceInfo.LAST_SNAPSHOT_RECORD_KEY); if (shouldBeSnapshot) { assertTrue("Snapshot marker expected but not found", (Boolean) snapshot); assertEquals("Last snapshot record marker mismatch", shouldBeLastSnapshotRecord, lastSnapshotRecord); } else { assertNull("Snapshot marker not expected, but found", snapshot); assertNull("Last snapshot marker not expected, but found", lastSnapshotRecord); } final Struct envelope = (Struct)record.value(); if (envelope != null) { final Struct source = (Struct)envelope.get("source"); final Boolean sourceSnapshot = source.getBoolean(SourceInfo.SNAPSHOT_KEY); final Boolean sourceLastSnapshotRecord = source.getBoolean(SourceInfo.LAST_SNAPSHOT_RECORD_KEY); if (shouldBeSnapshot) { assertTrue("Snapshot marker expected in source but not found", sourceSnapshot); assertEquals("Last snapshot record marker in source mismatch", shouldBeLastSnapshotRecord, sourceLastSnapshotRecord); } else { assertNull("Source snapshot marker not expected, but found", sourceSnapshot); assertNull("Source last snapshot marker not expected, but found", sourceLastSnapshotRecord); } } }
for (Iterator<SourceRecord> it = records.iterator(); it.hasNext();) { SourceRecord record = it.next(); assertThat(record.sourceOffset().get("snapshot")).as("Snapshot phase").isEqualTo(true); if (it.hasNext()) { assertThat(record.sourceOffset().get("snapshot_completed")).as("Snapshot in progress").isEqualTo(false); assertThat(record.sourceOffset().get("snapshot_completed")).as("Snapshot completed").isEqualTo(true); assertNull(valueB.get("before")); assertThat(recordA.sourceOffset().get("snapshot")).as("Streaming phase").isNull(); assertThat(recordA.sourceOffset().get("snapshot_completed")).as("Streaming phase").isNull(); assertThat(recordA.sourceOffset().get("change_lsn")).as("LSN present").isNotNull(); assertThat(recordB.sourceOffset().get("snapshot")).as("Streaming phase").isNull(); assertThat(recordB.sourceOffset().get("snapshot_completed")).as("Streaming phase").isNull(); assertThat(recordB.sourceOffset().get("change_lsn")).as("LSN present").isNotNull();
assertRecord(key1, expectedKey1); assertRecord((Struct)value1.get("after"), expectedRow1); assertThat(record1.sourceOffset()).includes( MapAssert.entry("snapshot", true), MapAssert.entry("snapshot_completed", i == INITIAL_RECORDS_PER_TABLE - 1));
public static void assertEquals(SourceRecord actual, SourceRecord expected, Predicate<String> ignoreFields, Map<String, RecordValueComparator> comparatorsByName, Map<String, RecordValueComparator> comparatorsBySchemaName) { assertThat(actual).isNotNull(); assertThat(expected).isNotNull(); assertEquals(null, actual.sourcePartition(), expected.sourcePartition(), "sourcePartition", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); assertEquals(null, actual.sourceOffset(), expected.sourceOffset(), "sourceOffset", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); assertThat(actual.topic()).isEqualTo(expected.topic()); assertThat(actual.kafkaPartition()).isEqualTo(expected.kafkaPartition()); Schema actualKeySchema = actual.keySchema(); Schema actualValueSchema = actual.valueSchema(); Schema expectedKeySchema = expected.keySchema(); Schema expectedValueSchema = expected.valueSchema(); if (!Objects.equals(actualKeySchema, expectedKeySchema)) { String actualStr = SchemaUtil.asString(actualKeySchema); String expectedStr = SchemaUtil.asString(expectedKeySchema); assertThat(actualStr).as("The key schema for record with key " + SchemaUtil.asString(actual.key()) + " did not match expected schema").isEqualTo(expectedStr); } if (!Objects.equals(actualValueSchema, expectedValueSchema)) { String actualStr = SchemaUtil.asString(actualValueSchema); String expectedStr = SchemaUtil.asString(expectedValueSchema); assertThat(actualStr).isEqualTo(expectedStr); } assertEquals(actualKeySchema, actual.key(), expected.key(), "key", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); assertEquals(actualValueSchema, actual.value(), expected.value(), "value", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); }
SourceRecord last = allRecords.get(allRecords.size() - 1); SourceRecord secondToLast = allRecords.get(allRecords.size() - 2); assertThat(secondToLast.sourceOffset().containsKey(SourceInfo.SNAPSHOT_KEY)).isTrue(); assertThat(last.sourceOffset().containsKey(SourceInfo.SNAPSHOT_KEY)).isFalse(); // not snapshot assertThat(((Struct) secondToLast.value()).getStruct(Envelope.FieldName.SOURCE).getBoolean(SourceInfo.SNAPSHOT_KEY)).isTrue(); assertThat(((Struct) last.value()).getStruct(Envelope.FieldName.SOURCE).getBoolean(SourceInfo.SNAPSHOT_KEY)).isTrue();
/** * Serialize the source record to document form. * * @param record the record; may not be null * @param keyConverter the converter for the record key's schema and payload * @param valueConverter the converter for the record value's schema and payload * @return the document form of the source record; never null * @throws IOException if there is an error converting the key or value */ private Document serializeSourceRecord(SourceRecord record, SchemaAndValueConverter keyConverter, SchemaAndValueConverter valueConverter) throws IOException { Document keyAndSchema = keyConverter.serialize(record.topic(), record.keySchema(), record.key()); Document valueAndSchema = valueConverter.serialize(record.topic(), record.valueSchema(), record.value()); Document sourcePartition = Document.create().putAll(record.sourcePartition()); Document sourceOffset = Document.create().putAll(record.sourceOffset()); Document parent = Document.create(); parent.set("sourcePartition", sourcePartition); parent.set("sourceOffset", sourceOffset); parent.set("topic", record.topic()); parent.set("kafkaPartition", record.kafkaPartition()); parent.set("keySchema", keyAndSchema.getDocument("schema")); parent.set("key", keyAndSchema.getDocument("payload")); parent.set("valueSchema", valueAndSchema.getDocument("schema")); parent.set("value", valueAndSchema.getDocument("payload")); return parent; }
@Override public void commitRecord(SourceRecord record) { String ackId = record.sourceOffset().get(cpsSubscription).toString(); deliveredAckIds.add(ackId); ackIds.remove(ackId); log.trace("Committed {}", ackId); } }
@Override public List<SourceRecord> poll() throws InterruptedException { List<DataChangeEvent> records = queue.poll(); List<SourceRecord> sourceRecords = records.stream() .map(DataChangeEvent::getRecord) .collect(Collectors.toList()); if (!sourceRecords.isEmpty()) { this.lastOffset = sourceRecords.get(sourceRecords.size() - 1).sourceOffset(); } return sourceRecords; }
protected void assertOffset(SourceRecord record, String offsetField, Object expectedValue) { Map<String,?> offset = record.sourceOffset(); Object value = offset.get(offsetField); assertSameValue(value,expectedValue); }