@Override public synchronized void markProcessed(SourceRecord record) throws InterruptedException { task.commitRecord(record); recordsSinceLastCommit += 1; offsetWriter.offset(record.sourcePartition(), record.sourceOffset()); }
long started = clock.currentTimeInMillis(); long timeout = started + commitTimeout.toMillis(); if (!offsetWriter.beginFlush()) return; Future<Void> flush = offsetWriter.doFlush(this::completedFlush); if (flush == null) return; // no offsets to commit ... offsetWriter.cancelFlush(); offsetWriter.cancelFlush(); offsetWriter.cancelFlush();
@Override public byte[] serialize(final String topic, final GenericRow genericRow) { if (genericRow == null) { return null; } final Struct struct = translator.toConnectRow(genericRow); try { return converter.fromConnectData(topic, struct.schema(), struct); } catch (final Exception e) { throw new SerializationException( "Error serializing row to topic " + topic + " using Converter API", e); } }
offsetStore.configure(workerConfig); offsetStore.start(); OffsetStorageWriter offsetWriter = new OffsetStorageWriter(offsetStore, engineName, keyConverter, valueConverter); OffsetStorageReader offsetReader = new OffsetStorageReaderImpl(offsetStore, engineName, keyConverter, valueConverter); Duration commitTimeout = Duration.ofMillis(config.getLong(OFFSET_COMMIT_TIMEOUT_MS)); offsetStore.stop();
keyConverter.configure(config.subset(EmbeddedEngine.INTERNAL_KEY_CONVERTER_CLASS.name() + ".", true).asMap(), true); Converter valueConverter = config.getInstance(EmbeddedEngine.INTERNAL_VALUE_CONVERTER_CLASS, Converter.class); Configuration valueConverterConfig = config; valueConverter.configure(valueConverterConfig.subset(EmbeddedEngine.INTERNAL_VALUE_CONVERTER_CLASS.name() + ".", true).asMap(), false); WorkerConfig workerConfig = new EmbeddedConfig(embeddedConfig); FileOffsetBackingStore offsetStore = new FileOffsetBackingStore(); offsetStore.configure(workerConfig); offsetStore.start(); try { OffsetStorageReaderImpl offsetReader = new OffsetStorageReaderImpl(offsetStore, engineName, keyConverter, valueConverter); return offsetReader.offsets(partitions); } finally { offsetStore.stop();
Map<String, ?> lastOffset = context.offsetStorageReader().offset(partition); long lastId = lastOffset == null ? 0L : (Long) lastOffset.get("id");
/** * Loads the connector's persistent offset (if present) via the given loader. */ @Override protected OffsetContext getPreviousOffset(OffsetContext.Loader loader) { Map<String, ?> partition = loader.getPartition(); Map<String, Object> previousOffset = context.offsetStorageReader() .offsets(Collections.singleton(partition)) .get(partition); if (previousOffset != null) { OffsetContext offsetContext = loader.load(previousOffset); LOGGER.info("Found previous offset {}", offsetContext); return offsetContext; } else { return null; } }
private KafkaOffsetBackingStore createKafkaOffsetBackingStore(WorkerConfig workerConfig) { KafkaOffsetBackingStore kafkaOffsetBackingStore = new KafkaOffsetBackingStore(); kafkaOffsetBackingStore.configure(workerConfig); kafkaOffsetBackingStore.start(); return kafkaOffsetBackingStore; }
@SuppressWarnings("unchecked") @Override public GenericRow deserialize(final String topic, final byte[] bytes) { try { final SchemaAndValue schemaAndValue = converter.toConnectData(topic, bytes); return connectToKsqlTranslator.toKsqlRow(schemaAndValue.schema(), schemaAndValue.value()); } catch (final Exception e) { recordLogger.error( ProcessingLogMessageFactory.deserializationErrorMsg(e, Optional.ofNullable(bytes))); throw e; } }
assert this.clock != null; keyConverter = config.getInstance(INTERNAL_KEY_CONVERTER_CLASS, Converter.class, () -> this.classLoader); keyConverter.configure(config.subset(INTERNAL_KEY_CONVERTER_CLASS.name() + ".", true).asMap(), true); valueConverter = config.getInstance(INTERNAL_VALUE_CONVERTER_CLASS, Converter.class, () -> this.classLoader); Configuration valueConverterConfig = config; valueConverter.configure(valueConverterConfig.subset(INTERNAL_VALUE_CONVERTER_CLASS.name() + ".", true).asMap(), false);
/** * Get the string encoding. * * @return the encoding; never null */ public String encoding() { return getString(ENCODING_CONFIG); } }
@Override public void onConnectorConfigUpdate(String connector) { // TODO: move connector configuration update handling here to be consistent with // the semantics of the config backing store synchronized (StandaloneHerder.this) { configState = configBackingStore.snapshot(); } }
Map<String, Object> existingOffset = context.offsetStorageReader().offset(sourceInfo.partition()); LoggingContext.PreviousContext previousContext = taskContext.configureLoggingContext(CONTEXT_NAME); try {
/** * Loads the connector's persistent offset (if present) via the given loader. */ protected OffsetContext getPreviousOffset(OffsetContext.Loader loader) { Map<String, ?> partition = loader.getPartition(); Map<String, Object> previousOffset = context.offsetStorageReader() .offsets(Collections.singleton(partition)) .get(partition); if (previousOffset != null) { OffsetContext offsetContext = loader.load(previousOffset); LOGGER.info("Found previous offset {}", offsetContext); return offsetContext; } else { return null; } } }
@Before public void setup() { connectDeserializer = new KsqlConnectDeserializer( converter, dataTranslator, recordLogger ); when(converter.toConnectData(any(), any())).thenReturn(new SchemaAndValue(schema, value)); when(dataTranslator.toKsqlRow(any(), any())).thenReturn(genericRow); }
boolean snapshotEventsAreInserts = true; Map<String, String> partition = Collect.hashMapOf(SourceInfo.SERVER_PARTITION_KEY, serverName); Map<String, ?> offsets = getRestartOffset(context.offsetStorageReader().offset(partition)); final SourceInfo source; if (offsets != null) {
context.offsetStorageReader().offsets(partitions).forEach(source::setOffsetFor);
@Test public void shouldDeserializeRecordsCorrectly() { // When: final GenericRow deserialized = connectDeserializer.deserialize(TOPIC, BYTES); // Then: verify(converter, times(1)).toConnectData(TOPIC, BYTES); verify(dataTranslator, times(1)).toKsqlRow(schema, value); assertThat(deserialized, sameInstance(genericRow)); }
@Test public void shouldLogOnError() { // Given: final RuntimeException error = new RuntimeException("bad"); reset(converter); when(converter.toConnectData(any(), any())).thenThrow(error); // When: try { connectDeserializer.deserialize(TOPIC, BYTES); fail("deserialize should have thrown"); } catch (final RuntimeException caught) { assertThat(caught, sameInstance(error)); } SerdeTestUtils.shouldLogError( recordLogger, ProcessingLogMessageFactory.deserializationErrorMsg(error, Optional.ofNullable(BYTES)).get() ); } }