assert this.clock != null; keyConverter = config.getInstance(INTERNAL_KEY_CONVERTER_CLASS, Converter.class, () -> this.classLoader); keyConverter.configure(config.subset(INTERNAL_KEY_CONVERTER_CLASS.name() + ".", true).asMap(), true); valueConverter = config.getInstance(INTERNAL_VALUE_CONVERTER_CLASS, Converter.class, () -> this.classLoader); Configuration valueConverterConfig = config; valueConverter.configure(valueConverterConfig.subset(INTERNAL_VALUE_CONVERTER_CLASS.name() + ".", true).asMap(), false);
@Override public byte[] serialize(final String topic, final GenericRow genericRow) { if (genericRow == null) { return null; } final Struct struct = translator.toConnectRow(genericRow); try { return converter.fromConnectData(topic, struct.schema(), struct); } catch (final Exception e) { throw new SerializationException( "Error serializing row to topic " + topic + " using Converter API", e); } }
@SuppressWarnings("unchecked") @Override public GenericRow deserialize(final String topic, final byte[] bytes) { try { final SchemaAndValue schemaAndValue = converter.toConnectData(topic, bytes); return connectToKsqlTranslator.toKsqlRow(schemaAndValue.schema(), schemaAndValue.value()); } catch (final Exception e) { recordLogger.error( ProcessingLogMessageFactory.deserializationErrorMsg(e, Optional.ofNullable(bytes))); throw e; } }
byte[] keySerialized = keyConverter.fromConnectData(namespace, null, Arrays.asList(namespace, key)); ByteBuffer keyBuffer = (keySerialized != null) ? ByteBuffer.wrap(keySerialized) : null; serializedToOriginal.put(keyBuffer, key); SchemaAndValue deserializedSchemaAndValue = valueConverter.toConnectData(namespace, rawEntry.getValue() != null ? rawEntry.getValue().array() : null); Object deserializedValue = deserializedSchemaAndValue.value(); OffsetUtils.validateFormat(deserializedValue);
@Override public Optional<String> getKey() { byte[] keyBytes = keyConverter.fromConnectData( srcRecord.topic(), srcRecord.keySchema(), srcRecord.key()); return Optional.of(Base64.getEncoder().encodeToString(keyBytes)); }
@Before public void setup() { connectDeserializer = new KsqlConnectDeserializer( converter, dataTranslator, recordLogger ); when(converter.toConnectData(any(), any())).thenReturn(new SchemaAndValue(schema, value)); when(dataTranslator.toKsqlRow(any(), any())).thenReturn(genericRow); }
keyConverter.configure(config.subset(EmbeddedEngine.INTERNAL_KEY_CONVERTER_CLASS.name() + ".", true).asMap(), true); Converter valueConverter = config.getInstance(EmbeddedEngine.INTERNAL_VALUE_CONVERTER_CLASS, Converter.class); Configuration valueConverterConfig = config; valueConverter.configure(valueConverterConfig.subset(EmbeddedEngine.INTERNAL_VALUE_CONVERTER_CLASS.name() + ".", true).asMap(), false);
/** * Write this connector configuration to persistent storage and wait until it has been acknowledged and read back by * tailing the Kafka log with a consumer. * * @param connector name of the connector to write data for * @param properties the configuration to write */ @Override public void putConnectorConfig(String connector, Map<String, String> properties) { log.debug("Writing connector configuration for connector '{}'", connector); Struct connectConfig = new Struct(CONNECTOR_CONFIGURATION_V0); connectConfig.put("properties", properties); byte[] serializedConfig = converter.fromConnectData(topic, CONNECTOR_CONFIGURATION_V0, connectConfig); updateConnectorConfig(connector, serializedConfig); }
@Test public void shouldDeserializeRecordsCorrectly() { // When: final GenericRow deserialized = connectDeserializer.deserialize(TOPIC, BYTES); // Then: verify(converter, times(1)).toConnectData(TOPIC, BYTES); verify(dataTranslator, times(1)).toKsqlRow(schema, value); assertThat(deserialized, sameInstance(genericRow)); }
private Converter createConverter(Map<String, String> parameters, String classKey, String propertyKey, boolean isKey) throws ClassNotFoundException, IllegalAccessException, InstantiationException, IOException { Converter ret = (Converter) Class.forName(parameters.get(classKey).get()).newInstance(); ret.configure(propertiesToMap(parameters.get(propertyKey).get()), isKey); return ret; }
OffsetUtils.validateFormat(entry.getValue()); byte[] key = keyConverter.fromConnectData(namespace, null, Arrays.asList(namespace, entry.getKey())); ByteBuffer keyBuffer = (key != null) ? ByteBuffer.wrap(key) : null; byte[] value = valueConverter.fromConnectData(namespace, null, entry.getValue()); ByteBuffer valueBuffer = (value != null) ? ByteBuffer.wrap(value) : null; offsetsSerialized.put(keyBuffer, valueBuffer);
@Test public void shouldLogOnError() { // Given: final RuntimeException error = new RuntimeException("bad"); reset(converter); when(converter.toConnectData(any(), any())).thenThrow(error); // When: try { connectDeserializer.deserialize(TOPIC, BYTES); fail("deserialize should have thrown"); } catch (final RuntimeException caught) { assertThat(caught, sameInstance(error)); } SerdeTestUtils.shouldLogError( recordLogger, ProcessingLogMessageFactory.deserializationErrorMsg(error, Optional.ofNullable(BYTES)).get() ); } }
converter.configure(subKeys, isKey);
@Override public Dataset<Row> getBatch(Option<Offset> start, Offset end) { return sqlContext.createDataFrame( sharedSourceTaskContext.read(start.isDefined() ? Optional.of(start.get()) : Optional.empty(), end) .stream() .map(record -> new GenericRow(new Object[]{ record.topic(), record.kafkaPartition(), keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key()), valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value()) })).collect(Collectors.toList()), DATA_SCHEMA); }
@SuppressWarnings("unchecked") private OffsetInfo toOffsetInfo(Map.Entry<ByteBuffer, ByteBuffer> offsetEntry) { Object kafkaConnectOffsetKey = internalConverter.toConnectData(null, offsetEntry.getKey().array()).value(); // Deserialize the internal Mirus offset format List<Object> keyList = (List) kafkaConnectOffsetKey; Map<String, Object> parts = (Map) keyList.get(1); ByteBuffer val = offsetEntry.getValue(); Long offset = null; // Handle null-valued records, i.e. tombstone messages if (val != null) { Object kafkaConnectOffsetValue = internalConverter.toConnectData(null, val.array()).value(); Map<String, Object> valueMap = (Map) kafkaConnectOffsetValue; offset = (Long) valueMap.get("offset"); } return new OffsetInfo( (String) keyList.get(0), (String) parts.get("topic"), ((Long) parts.get("partition")), offset); } }
plugin.configure(converterConfig, isKeyConverter); return plugin;
private void writeAll(Collection<SinkRecord> records) { metrics.hist(records.size(), "putSize", tags); try (Metrics.StopTimer ignored = metrics.time("writeAll", tags)) { writer.write(format.writeBatch(records.stream().map(record -> new ProducerRecord<>(record.topic(), record.kafkaPartition(), keyConverter.map(c -> c.fromConnectData(record.topic(), record.keySchema(), record.key())) .orElse(null), valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value()) ))).collect(toList()), records.size()); } catch (IOException e) { throw new RetriableException("Failed to write to buffer", e); } }
private ConnectorStatus parseConnectorStatus(String connector, byte[] data) { try { SchemaAndValue schemaAndValue = converter.toConnectData(topic, data); if (!(schemaAndValue.value() instanceof Map)) { log.error("Invalid connector status type {}", schemaAndValue.value().getClass()); return null; } @SuppressWarnings("unchecked") Map<String, Object> statusMap = (Map<String, Object>) schemaAndValue.value(); TaskStatus.State state = TaskStatus.State.valueOf((String) statusMap.get(STATE_KEY_NAME)); String trace = (String) statusMap.get(TRACE_KEY_NAME); String workerUrl = (String) statusMap.get(WORKER_ID_KEY_NAME); int generation = ((Long) statusMap.get(GENERATION_KEY_NAME)).intValue(); return new ConnectorStatus(connector, state, trace, workerUrl, generation); } catch (Exception e) { log.error("Failed to deserialize connector status", e); return null; } }
private static MirusOffsetTool newOffsetTool(Args args) throws IOException { // This needs to be the admin topic properties. // By default these are in the worker properties file, as this has the has admin producer and // consumer settings. Separating these might be wise - also useful for storing state in // source cluster if it proves necessary. final Map<String, String> properties = !args.propertiesFile.isEmpty() ? Utils.propsToStringMap(Utils.loadProps(args.propertiesFile)) : Collections.emptyMap(); final DistributedConfig config = new DistributedConfig(properties); final KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(); offsetBackingStore.configure(config); // Avoid initializing the entire Kafka Connect plugin system by assuming the // internal.[key|value].converter is org.apache.kafka.connect.json.JsonConverter final Converter internalConverter = new JsonConverter(); internalConverter.configure(config.originalsWithPrefix("internal.key.converter."), true); final OffsetSetter offsetSetter = new OffsetSetter(internalConverter, offsetBackingStore); final OffsetFetcher offsetFetcher = new OffsetFetcher(config, internalConverter); final OffsetSerDe offsetSerDe = OffsetSerDeFactory.create(args.format); return new MirusOffsetTool(args, offsetFetcher, offsetSetter, offsetSerDe); }
@Override public byte[] getValue() { return valueConverter.fromConnectData( srcRecord.topic(), srcRecord.valueSchema(), srcRecord.value()); }