/** * Produce an empty record to the heartbeat topic. * */ private SourceRecord heartbeatRecord(Map<String, ?> sourcePartition, Map<String, ?> sourceOffset) { final Integer partition = 0; return new SourceRecord(sourcePartition, sourceOffset, topicName, partition, KEY_SCHEMA, serverNameKey(key), null, null); }
/** {@inheritDoc} */ @Override public List<SourceRecord> poll() throws InterruptedException { ArrayList<SourceRecord> records = new ArrayList<>(evtBatchSize); ArrayList<CacheEvent> evts = new ArrayList<>(evtBatchSize); if (stopped) return records; try { if (evtBuf.drainTo(evts, evtBatchSize) > 0) { for (CacheEvent evt : evts) { // schema and keys are ignored. for (String topic : topics) records.add(new SourceRecord(srcPartition, offset, topic, null, evt)); } return records; } } catch (IgniteException e) { log.error("Error when polling event queue!", e); } // for shutdown. return null; }
private SourceRecord createSourceRecordWithOffset(Map<String, ?> offset) { return new SourceRecord(null, offset, null, null, null); }
private static SourceRecord record() { return new SourceRecord(null, null, null, null, null, null); }
/** * Create an "offset" containing a single timestamp element with the given value. * Needed because {@link ParallelSnapshotReader.ParallelHaltingPredicate} halts based on how * close the record's timestamp is to the present time. * @param tsSec the timestamp in the resulting offset. * @return an "offset" containing the given timestamp. */ private SourceRecord createSourceRecordWithTimestamp(Instant ts) { Map<String, ?> offset = Collections.singletonMap(SourceInfo.TIMESTAMP_KEY, ts.getEpochSecond()); return new SourceRecord(null, offset, null, null, null); } }
@Test public void testTombstoneForwardConfigured() { try (final UnwrapFromEnvelope<SourceRecord> transform = new UnwrapFromEnvelope<>()) { final Map<String, String> props = new HashMap<>(); props.put(DROP_TOMBSTONES, "false"); transform.configure(props); final SourceRecord tombstone = new SourceRecord(new HashMap<>(), new HashMap<>(), "dummy", null, null); assertThat(transform.apply(tombstone)).isEqualTo(tombstone); } }
@Test public void testTombstoneDroppedConfigured() { try (final UnwrapFromEnvelope<SourceRecord> transform = new UnwrapFromEnvelope<>()) { final Map<String, String> props = new HashMap<>(); props.put(DROP_TOMBSTONES, "true"); transform.configure(props); final SourceRecord tombstone = new SourceRecord(new HashMap<>(), new HashMap<>(), "dummy", null, null); assertThat(transform.apply(tombstone)).isNull(); } }
@Test public void testTombstoneDroppedByDefault() { try (final UnwrapFromEnvelope<SourceRecord> transform = new UnwrapFromEnvelope<>()) { final Map<String, String> props = new HashMap<>(); transform.configure(props); final SourceRecord tombstone = new SourceRecord(new HashMap<>(), new HashMap<>(), "dummy", null, null); assertThat(transform.apply(tombstone)).isNull(); } }
private SourceRecord createUnknownUnnamedSchemaRecord() { final Schema recordSchema = SchemaBuilder.struct() .field("id", SchemaBuilder.int8()) .build(); final Struct before = new Struct(recordSchema); before.put("id", (byte)1); return new SourceRecord(new HashMap<>(), new HashMap<>(), "dummy", recordSchema, before); }
private SourceRecord createUnknownRecord() { final Schema recordSchema = SchemaBuilder.struct().name("unknown") .field("id", SchemaBuilder.int8()) .build(); final Struct before = new Struct(recordSchema); before.put("id", (byte)1); return new SourceRecord(new HashMap<>(), new HashMap<>(), "dummy", recordSchema, before); }
/** * Produce a schema change record for the given DDL statements. * * @param databaseName the name of the database that is affected by the DDL statements; may not be null * @param ddlStatements the DDL statements; may not be null * @param consumer the consumer for all produced records; may not be null * @return the number of records produced; will be 0 or more */ public int schemaChanges(String databaseName, String ddlStatements, BlockingConsumer<SourceRecord> consumer) { String topicName = topicSelector.getPrimaryTopic(); Integer partition = 0; Struct key = schemaChangeRecordKey(databaseName); Struct value = schemaChangeRecordValue(databaseName, ddlStatements); SourceRecord record = new SourceRecord(source.partition(), source.offset(), topicName, partition, schemaChangeKeySchema, key, schemaChangeValueSchema, value); try { consumer.accept(record); return 1; } catch (InterruptedException e) { return 0; } }
@Test @FixFor("DBZ-1086") public void testKeyNullValue() { final ByLogicalTableRouter<SourceRecord> router = new ByLogicalTableRouter<>(); final Map<String, String> props = new HashMap<>(); props.put("topic.regex", "(.*)customers_shard(.*)"); props.put("topic.replacement", "$1customers_all_shards"); props.put("key.field.name", "shard_id"); props.put("key.field.regex", "(.*)customers_shard_(.*)"); props.put("key.field.replacement", "$2"); router.configure(props); SourceRecord record1 = new SourceRecord( new HashMap<>(), new HashMap<>(), "mysql-server-1.inventory.customers_shard_1", null, null, null, null ); SourceRecord transformed1 = router.apply(record1); assertThat(transformed1).isNotNull(); assertThat(transformed1.topic()).isEqualTo("mysql-server-1.inventory.customers_all_shards"); assertThat(transformed1.keySchema()).isNull(); assertThat(transformed1.key()).isNull(); }
/** * Stop buffering source records, and flush any buffered records by replacing their offset with the provided offset. * Note that this only replaces the record's {@link SourceRecord#sourceOffset() offset} and does not change the * value of the record, which may contain information about the snapshot. * * @param newOffset the offset that reflects that the snapshot has been completed; may not be null * @throws InterruptedException if the thread is interrupted while waiting for the new record to be flushed */ protected synchronized void stopBuffering(Map<String, ?> newOffset) throws InterruptedException { assert newOffset != null; this.buffered.close(record -> { if (record == null) return null; return new SourceRecord(record.sourcePartition(), newOffset, record.topic(), record.kafkaPartition(), record.keySchema(), record.key(), record.valueSchema(), record.value()); }); this.current = this.actual; }
private SourceRecord createCreateRecord() { final Schema recordSchema = SchemaBuilder.struct().field("id", SchemaBuilder.int8()).build(); Envelope envelope = Envelope.defineSchema() .withName("dummy.Envelope") .withRecord(recordSchema) .withSource(SchemaBuilder.struct().build()) .build(); final Struct before = new Struct(recordSchema); before.put("id", (byte)1); final Struct payload = envelope.create(before, null, System.nanoTime()); return new SourceRecord(new HashMap<>(), new HashMap<>(), "dummy", envelope.schema(), payload); }
@Override public int read(SourceInfo source, Object[] row, int rowNumber, int numberOfRows, BitSet includedColumns, long ts, BlockingConsumer<SourceRecord> consumer) throws InterruptedException { Object key = tableSchema.keyFromColumnData(row); Struct value = tableSchema.valueFromColumnData(row); if (value != null || key != null) { Schema keySchema = tableSchema.keySchema(); Map<String, ?> partition = source.partition(); Map<String, Object> offset = source.offsetForRow(rowNumber, numberOfRows); Struct origin = source.struct(id); SourceRecord record = new SourceRecord(partition, getSourceRecordOffset(offset), topicName, partitionNum, keySchema, key, envelope.schema(), envelope.read(value, origin, ts)); consumer.accept(record); return 1; } return 0; }
private SourceRecord createDeleteRecord() { final Schema recordSchema = SchemaBuilder.struct().field("id", SchemaBuilder.int8()).build(); Envelope envelope = Envelope.defineSchema() .withName("dummy.Envelope") .withRecord(recordSchema) .withSource(SchemaBuilder.struct().build()) .build(); final Struct before = new Struct(recordSchema); before.put("id", (byte)1); final Struct payload = envelope.delete(before, null, System.nanoTime()); return new SourceRecord(new HashMap<>(), new HashMap<>(), "dummy", envelope.schema(), payload); }
@Override public int insert(SourceInfo source, Object[] row, int rowNumber, int numberOfRows, BitSet includedColumns, long ts, BlockingConsumer<SourceRecord> consumer) throws InterruptedException { Object key = tableSchema.keyFromColumnData(row); Struct value = tableSchema.valueFromColumnData(row); if (value != null || key != null) { Schema keySchema = tableSchema.keySchema(); Map<String, ?> partition = source.partition(); Map<String, Object> offset = source.offsetForRow(rowNumber, numberOfRows); Struct origin = source.struct(id); SourceRecord record = new SourceRecord(partition, getSourceRecordOffset(offset), topicName, partitionNum, keySchema, key, envelope.schema(), envelope.create(value, origin, ts)); consumer.accept(record); return 1; } return 0; }
/** * Utility method to replace the offset in the given record with the latest. This is used on the last record produced * during the snapshot. * * @param record the record * @return the updated record */ protected SourceRecord replaceOffset(SourceRecord record) { if (record == null) return null; Map<String, ?> newOffset = context.source().offset(); return new SourceRecord(record.sourcePartition(), newOffset, record.topic(), record.kafkaPartition(), record.keySchema(), record.key(), record.valueSchema(), record.value()); }
protected void generateReadRecord(TableId tableId, Object[] rowData) { if (rowData.length == 0) { return; } TableSchema tableSchema = schema().schemaFor(tableId); assert tableSchema != null; Object key = tableSchema.keyFromColumnData(rowData); Struct value = tableSchema.valueFromColumnData(rowData); if (key == null || value == null) { return; } Schema keySchema = tableSchema.keySchema(); sourceInfo.update(clock().currentTimeInMicros(), tableId); Map<String, ?> partition = sourceInfo.partition(); Map<String, ?> offset = sourceInfo.offset(); String topicName = topicSelector().topicNameFor(tableId); Envelope envelope = tableSchema.getEnvelopeSchema(); currentRecord.set(new SourceRecord(partition, offset, topicName, null, keySchema, key, envelope.schema(), envelope.read(value, sourceInfo.source(), clock().currentTimeInMillis()))); }
private SourceRecord rehydrateSourceRecord(Document record, SchemaAndValueConverter keyConverter, SchemaAndValueConverter valueConverter) throws IOException { Document sourcePartitionDoc = record.getDocument("sourcePartition"); Document sourceOffsetDoc = record.getDocument("sourceOffset"); String topic = record.getString("topic"); Integer kafkaPartition = record.getInteger("kafkaPartition"); Document keySchema = record.getDocument("keySchema"); Document valueSchema = record.getDocument("valueSchema"); Document key = record.getDocument("key"); Document value = record.getDocument("value"); Document keyAndSchemaDoc = Document.create("schema", keySchema, "payload", key); Document valueAndSchemaDoc = Document.create("schema", valueSchema, "payload", value); SchemaAndValue keyWithSchema = keyConverter.deserialize(topic, keyAndSchemaDoc); SchemaAndValue valueWithSchema = valueConverter.deserialize(topic, valueAndSchemaDoc); Map<String, ?> sourcePartition = toMap(sourcePartitionDoc); Map<String, ?> sourceOffset = toMap(sourceOffsetDoc); return new SourceRecord(sourcePartition, sourceOffset, topic, kafkaPartition, keyWithSchema.schema(), keyWithSchema.value(), valueWithSchema.schema(), valueWithSchema.value()); }