@Override public byte[] serialize(final String topic, final GenericRow genericRow) { if (genericRow == null) { return null; } final Struct struct = translator.toConnectRow(genericRow); try { return converter.fromConnectData(topic, struct.schema(), struct); } catch (final Exception e) { throw new SerializationException( "Error serializing row to topic " + topic + " using Converter API", e); } }
@Override public Optional<String> getKey() { byte[] keyBytes = keyConverter.fromConnectData( srcRecord.topic(), srcRecord.keySchema(), srcRecord.key()); return Optional.of(Base64.getEncoder().encodeToString(keyBytes)); }
/** * Write this connector configuration to persistent storage and wait until it has been acknowledged and read back by * tailing the Kafka log with a consumer. * * @param connector name of the connector to write data for * @param properties the configuration to write */ @Override public void putConnectorConfig(String connector, Map<String, String> properties) { log.debug("Writing connector configuration for connector '{}'", connector); Struct connectConfig = new Struct(CONNECTOR_CONFIGURATION_V0); connectConfig.put("properties", properties); byte[] serializedConfig = converter.fromConnectData(topic, CONNECTOR_CONFIGURATION_V0, connectConfig); updateConnectorConfig(connector, serializedConfig); }
@Override public Dataset<Row> getBatch(Option<Offset> start, Offset end) { return sqlContext.createDataFrame( sharedSourceTaskContext.read(start.isDefined() ? Optional.of(start.get()) : Optional.empty(), end) .stream() .map(record -> new GenericRow(new Object[]{ record.topic(), record.kafkaPartition(), keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key()), valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value()) })).collect(Collectors.toList()), DATA_SCHEMA); }
OffsetUtils.validateFormat(entry.getValue()); byte[] key = keyConverter.fromConnectData(namespace, null, Arrays.asList(namespace, entry.getKey())); ByteBuffer keyBuffer = (key != null) ? ByteBuffer.wrap(key) : null; byte[] value = valueConverter.fromConnectData(namespace, null, entry.getValue()); ByteBuffer valueBuffer = (value != null) ? ByteBuffer.wrap(value) : null; offsetsSerialized.put(keyBuffer, valueBuffer);
private void writeAll(Collection<SinkRecord> records) { metrics.hist(records.size(), "putSize", tags); try (Metrics.StopTimer ignored = metrics.time("writeAll", tags)) { writer.write(format.writeBatch(records.stream().map(record -> new ProducerRecord<>(record.topic(), record.kafkaPartition(), keyConverter.map(c -> c.fromConnectData(record.topic(), record.keySchema(), record.key())) .orElse(null), valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value()) ))).collect(toList()), records.size()); } catch (IOException e) { throw new RetriableException("Failed to write to buffer", e); } }
@Override public byte[] getValue() { return valueConverter.fromConnectData( srcRecord.topic(), srcRecord.valueSchema(), srcRecord.value()); }
byte[] keySerialized = keyConverter.fromConnectData(namespace, null, Arrays.asList(namespace, key)); ByteBuffer keyBuffer = (keySerialized != null) ? ByteBuffer.wrap(keySerialized) : null; serializedToOriginal.put(keyBuffer, key);
@Override public void putTargetState(String connector, TargetState state) { Struct connectTargetState = new Struct(TARGET_STATE_V0); connectTargetState.put("state", state.name()); byte[] serializedTargetState = converter.fromConnectData(topic, TARGET_STATE_V0, connectTargetState); log.debug("Writing target state {} for connector {}", state, connector); configLog.send(TARGET_STATE_KEY(connector), serializedTargetState); }
Struct connectConfig = new Struct(TASK_CONFIGURATION_V0); connectConfig.put("properties", taskConfig); byte[] serializedConfig = converter.fromConnectData(topic, TASK_CONFIGURATION_V0, connectConfig); log.debug("Writing configuration for connector '{}' task {}", connector, index); ConnectorTaskId connectorTaskId = new ConnectorTaskId(connector, index); byte[] serializedConfig = converter.fromConnectData(topic, CONNECTOR_TASKS_COMMIT_V0, connectConfig); log.debug("Writing commit for connector '{}' with {} tasks.", connector, taskCount); configLog.send(COMMIT_TASKS_KEY(connector), serializedConfig);
/** * Convert the source record into a producer record. * * @param record the transformed record * @return the producer record which can sent over to Kafka. A null is returned if the input is null or * if an error was encountered during any of the converter stages. */ private ProducerRecord<byte[], byte[]> convertTransformedRecord(SourceRecord record) { if (record == null) { return null; } RecordHeaders headers = retryWithToleranceOperator.execute(() -> convertHeaderFor(record), Stage.HEADER_CONVERTER, headerConverter.getClass()); byte[] key = retryWithToleranceOperator.execute(() -> keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key()), Stage.KEY_CONVERTER, keyConverter.getClass()); byte[] value = retryWithToleranceOperator.execute(() -> valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value()), Stage.VALUE_CONVERTER, valueConverter.getClass()); if (retryWithToleranceOperator.failed()) { return null; } return new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value, headers); }
private byte[] serialize(AbstractStatus status) { Struct struct = new Struct(STATUS_SCHEMA_V0); struct.put(STATE_KEY_NAME, status.state().name()); if (status.trace() != null) struct.put(TRACE_KEY_NAME, status.trace()); struct.put(WORKER_ID_KEY_NAME, status.workerId()); struct.put(GENERATION_KEY_NAME, status.generation()); return converter.fromConnectData(topic, STATUS_SCHEMA_V0, struct); }