@Override public synchronized void markProcessed(SourceRecord record) throws InterruptedException { task.commitRecord(record); recordsSinceLastCommit += 1; offsetWriter.offset(record.sourcePartition(), record.sourceOffset()); }
@Override public void accept(List<SourceRecord> records) { if (records.isEmpty()) return; if (!logger.isInfoEnabled()) return; summaryByReplicaSet.clear(); records.forEach(record -> { String replicaSetName = SourceInfo.replicaSetNameForPartition(record.sourcePartition()); if (replicaSetName != null) { summaryByReplicaSet.computeIfAbsent(replicaSetName, rsName -> new ReplicaSetSummary()).add(record); } }); if (!summaryByReplicaSet.isEmpty()) { PreviousContext prevContext = taskContext.configureLoggingContext("task"); try { summaryByReplicaSet.forEach((rsName, summary) -> { logger.info("{} records sent for replica set '{}', last offset: {}", summary.recordCount(), rsName, summary.lastOffset()); }); } finally { prevContext.restore(); } } } }
this.currentRecord.set(new SourceRecord(currentRecord.sourcePartition(), sourceInfo.offset(), currentRecord.topic(), currentRecord.kafkaPartition(), currentRecord.keySchema(), currentRecord.key(),
SourceRecord record = (SourceRecord) obj; sb.append('{'); appendFirst("sourcePartition", record.sourcePartition()); appendAdditional("sourceOffset", record.sourceOffset()); appendAdditional("topic", record.topic());
/** * Stop buffering source records, and flush any buffered records by replacing their offset with the provided offset. * Note that this only replaces the record's {@link SourceRecord#sourceOffset() offset} and does not change the * value of the record, which may contain information about the snapshot. * * @param newOffset the offset that reflects that the snapshot has been completed; may not be null * @throws InterruptedException if the thread is interrupted while waiting for the new record to be flushed */ protected synchronized void stopBuffering(Map<String, ?> newOffset) throws InterruptedException { assert newOffset != null; this.buffered.close(record -> { if (record == null) return null; return new SourceRecord(record.sourcePartition(), newOffset, record.topic(), record.kafkaPartition(), record.keySchema(), record.key(), record.valueSchema(), record.value()); }); this.current = this.actual; }
/** * Utility method to replace the offset in the given record with the latest. This is used on the last record produced * during the snapshot. * * @param record the record * @return the updated record */ protected SourceRecord replaceOffset(SourceRecord record) { if (record == null) return null; Map<String, ?> newOffset = context.source().offset(); return new SourceRecord(record.sourcePartition(), newOffset, record.topic(), record.kafkaPartition(), record.keySchema(), record.key(), record.valueSchema(), record.value()); }
public static void assertEquals(SourceRecord actual, SourceRecord expected, Predicate<String> ignoreFields, Map<String, RecordValueComparator> comparatorsByName, Map<String, RecordValueComparator> comparatorsBySchemaName) { assertThat(actual).isNotNull(); assertThat(expected).isNotNull(); assertEquals(null, actual.sourcePartition(), expected.sourcePartition(), "sourcePartition", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); assertEquals(null, actual.sourceOffset(), expected.sourceOffset(), "sourceOffset", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); assertThat(actual.topic()).isEqualTo(expected.topic()); assertThat(actual.kafkaPartition()).isEqualTo(expected.kafkaPartition()); Schema actualKeySchema = actual.keySchema(); Schema actualValueSchema = actual.valueSchema(); Schema expectedKeySchema = expected.keySchema(); Schema expectedValueSchema = expected.valueSchema(); if (!Objects.equals(actualKeySchema, expectedKeySchema)) { String actualStr = SchemaUtil.asString(actualKeySchema); String expectedStr = SchemaUtil.asString(expectedKeySchema); assertThat(actualStr).as("The key schema for record with key " + SchemaUtil.asString(actual.key()) + " did not match expected schema").isEqualTo(expectedStr); } if (!Objects.equals(actualValueSchema, expectedValueSchema)) { String actualStr = SchemaUtil.asString(actualValueSchema); String expectedStr = SchemaUtil.asString(expectedValueSchema); assertThat(actualStr).isEqualTo(expectedStr); } assertEquals(actualKeySchema, actual.key(), expected.key(), "key", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); assertEquals(actualValueSchema, actual.value(), expected.value(), "value", "", ignoreFields, comparatorsByName, comparatorsBySchemaName); }
/** * Serialize the source record to document form. * * @param record the record; may not be null * @param keyConverter the converter for the record key's schema and payload * @param valueConverter the converter for the record value's schema and payload * @return the document form of the source record; never null * @throws IOException if there is an error converting the key or value */ private Document serializeSourceRecord(SourceRecord record, SchemaAndValueConverter keyConverter, SchemaAndValueConverter valueConverter) throws IOException { Document keyAndSchema = keyConverter.serialize(record.topic(), record.keySchema(), record.key()); Document valueAndSchema = valueConverter.serialize(record.topic(), record.valueSchema(), record.value()); Document sourcePartition = Document.create().putAll(record.sourcePartition()); Document sourceOffset = Document.create().putAll(record.sourceOffset()); Document parent = Document.create(); parent.set("sourcePartition", sourcePartition); parent.set("sourceOffset", sourceOffset); parent.set("topic", record.topic()); parent.set("kafkaPartition", record.kafkaPartition()); parent.set("keySchema", keyAndSchema.getDocument("schema")); parent.set("key", keyAndSchema.getDocument("payload")); parent.set("valueSchema", valueAndSchema.getDocument("schema")); parent.set("value", valueAndSchema.getDocument("payload")); return parent; }
@Override public Optional<String> getPartitionId() { String partitionId = srcRecord.sourcePartition() .entrySet() .stream() .map(e -> e.getKey() + "=" + e.getValue()) .collect(Collectors.joining(",")); return Optional.of(partitionId); }
private synchronized Record<byte[]> processSourceRecord(final SourceRecord srcRecord) { outstandingRecords.put(srcRecord, srcRecord); offsetWriter.offset(srcRecord.sourcePartition(), srcRecord.sourceOffset()); return new Record<byte[]>() { @Override
current._3().commitRecord(current._1()); offsetStorageWriter.offset(current._1().sourcePartition(), current._1().sourceOffset()); } catch (Throwable t) { LOGGER.warn("Unable to properly commit offset " + current._2(), t);
@Override public void serialize(SourceRecord record, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) throws IOException, JsonProcessingException { Storage storage = new Storage(); storage.sourcePartition = record.sourcePartition(); storage.sourceOffset = record.sourceOffset(); storage.topic = record.topic(); storage.kafkaPartition = record.kafkaPartition(); storage.keySchema = record.keySchema(); storage.key = record.key(); storage.valueSchema = record.valueSchema(); storage.value = record.value(); storage.timestamp = record.timestamp(); if (null != record.headers()) { List<Header> headers = new ArrayList<>(); for (Header header : record.headers()) { headers.add(header); } storage.headers = headers; } jsonGenerator.writeObject(storage); } }
offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
@Test public void testPollRecordReturnedNoIncludeHeaders() throws Exception { mockConsumerInitialization(); EasyMock.expect(consumer.poll(Duration.ofMillis(POLL_LOOP_TIMEOUT_MS_VALUE))).andReturn(createTestRecords()); replayAll(); objectUnderTest.start(opts); List<SourceRecord> records = objectUnderTest.poll(); SourceRecord testRecord = records.get(0); assertEquals(String.format("%s:%d", FIRST_TOPIC, FIRST_PARTITION), testRecord.sourcePartition().get(TOPIC_PARTITION_KEY)); assertEquals(FIRST_OFFSET, testRecord.sourceOffset().get(OFFSET_KEY)); assertEquals(0, testRecord.headers().size()); verifyAll(); }
this.currentRecord.set(new SourceRecord(currentRecord.sourcePartition(), sourceInfo.offset(), currentRecord.topic(), currentRecord.kafkaPartition(), currentRecord.keySchema(), currentRecord.key(),
SourceRecord record = (SourceRecord) obj; sb.append('{'); appendFirst("sourcePartition", record.sourcePartition()); appendAdditional("sourceOffset", record.sourceOffset()); appendAdditional("topic", record.topic());
assertEquals(TOPIC, records.get(0).topic()); assertEquals("partial line finished", records.get(0).value()); assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition()); assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 22L), records.get(0).sourceOffset()); assertEquals(null, task.poll()); assertEquals(4, records.size()); assertEquals("line1", records.get(0).value()); assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition()); assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 28L), records.get(0).sourceOffset()); assertEquals("line2", records.get(1).value()); assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(1).sourcePartition()); assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 35L), records.get(1).sourceOffset()); assertEquals("line3", records.get(2).value()); assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(2).sourcePartition()); assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 41L), records.get(2).sourceOffset()); assertEquals("line4", records.get(3).value()); assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(3).sourcePartition()); assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 47L), records.get(3).sourceOffset()); assertEquals(1, records.size()); assertEquals("", records.get(0).value()); assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition()); assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 48L), records.get(0).sourceOffset());
testRecord.sourcePartition().get(TOPIC_PARTITION_KEY)); assertEquals(FIRST_OFFSET, testRecord.sourceOffset().get(OFFSET_KEY)); assertEquals(1, testRecord.headers().size());
/** * Serialize the source record to document form. * * @param record the record; may not be null * @param keyConverter the converter for the record key's schema and payload * @param valueConverter the converter for the record value's schema and payload * @return the document form of the source record; never null * @throws IOException if there is an error converting the key or value */ private Document serializeSourceRecord(SourceRecord record, SchemaAndValueConverter keyConverter, SchemaAndValueConverter valueConverter) throws IOException { Document keyAndSchema = keyConverter.serialize(record.topic(), record.keySchema(), record.key()); Document valueAndSchema = valueConverter.serialize(record.topic(), record.valueSchema(), record.value()); Document sourcePartition = Document.create().putAll(record.sourcePartition()); Document sourceOffset = Document.create().putAll(record.sourceOffset()); Document parent = Document.create(); parent.set("sourcePartition", sourcePartition); parent.set("sourceOffset", sourceOffset); parent.set("topic", record.topic()); parent.set("kafkaPartition", record.kafkaPartition()); parent.set("keySchema", keyAndSchema.getDocument("schema")); parent.set("key", keyAndSchema.getDocument("payload")); parent.set("valueSchema", valueAndSchema.getDocument("schema")); parent.set("value", valueAndSchema.getDocument("payload")); return parent; }