@SuppressWarnings("unchecked") @Override public GenericRow deserialize(final String topic, final byte[] bytes) { try { final SchemaAndValue schemaAndValue = converter.toConnectData(topic, bytes); return connectToKsqlTranslator.toKsqlRow(schemaAndValue.schema(), schemaAndValue.value()); } catch (final Exception e) { recordLogger.error( ProcessingLogMessageFactory.deserializationErrorMsg(e, Optional.ofNullable(bytes))); throw e; } }
@Before public void setup() { connectDeserializer = new KsqlConnectDeserializer( converter, dataTranslator, recordLogger ); when(converter.toConnectData(any(), any())).thenReturn(new SchemaAndValue(schema, value)); when(dataTranslator.toKsqlRow(any(), any())).thenReturn(genericRow); }
@Test public void shouldDeserializeRecordsCorrectly() { // When: final GenericRow deserialized = connectDeserializer.deserialize(TOPIC, BYTES); // Then: verify(converter, times(1)).toConnectData(TOPIC, BYTES); verify(dataTranslator, times(1)).toKsqlRow(schema, value); assertThat(deserialized, sameInstance(genericRow)); }
@Test public void shouldLogOnError() { // Given: final RuntimeException error = new RuntimeException("bad"); reset(converter); when(converter.toConnectData(any(), any())).thenThrow(error); // When: try { connectDeserializer.deserialize(TOPIC, BYTES); fail("deserialize should have thrown"); } catch (final RuntimeException caught) { assertThat(caught, sameInstance(error)); } SerdeTestUtils.shouldLogError( recordLogger, ProcessingLogMessageFactory.deserializationErrorMsg(error, Optional.ofNullable(BYTES)).get() ); } }
@SuppressWarnings("unchecked") private OffsetInfo toOffsetInfo(Map.Entry<ByteBuffer, ByteBuffer> offsetEntry) { Object kafkaConnectOffsetKey = internalConverter.toConnectData(null, offsetEntry.getKey().array()).value(); // Deserialize the internal Mirus offset format List<Object> keyList = (List) kafkaConnectOffsetKey; Map<String, Object> parts = (Map) keyList.get(1); ByteBuffer val = offsetEntry.getValue(); Long offset = null; // Handle null-valued records, i.e. tombstone messages if (val != null) { Object kafkaConnectOffsetValue = internalConverter.toConnectData(null, val.array()).value(); Map<String, Object> valueMap = (Map) kafkaConnectOffsetValue; offset = (Long) valueMap.get("offset"); } return new OffsetInfo( (String) keyList.get(0), (String) parts.get("topic"), ((Long) parts.get("partition")), offset); } }
private ConnectorStatus parseConnectorStatus(String connector, byte[] data) { try { SchemaAndValue schemaAndValue = converter.toConnectData(topic, data); if (!(schemaAndValue.value() instanceof Map)) { log.error("Invalid connector status type {}", schemaAndValue.value().getClass()); return null; } @SuppressWarnings("unchecked") Map<String, Object> statusMap = (Map<String, Object>) schemaAndValue.value(); TaskStatus.State state = TaskStatus.State.valueOf((String) statusMap.get(STATE_KEY_NAME)); String trace = (String) statusMap.get(TRACE_KEY_NAME); String workerUrl = (String) statusMap.get(WORKER_ID_KEY_NAME); int generation = ((Long) statusMap.get(GENERATION_KEY_NAME)).intValue(); return new ConnectorStatus(connector, state, trace, workerUrl, generation); } catch (Exception e) { log.error("Failed to deserialize connector status", e); return null; } }
private TaskStatus parseTaskStatus(ConnectorTaskId taskId, byte[] data) { try { SchemaAndValue schemaAndValue = converter.toConnectData(topic, data); if (!(schemaAndValue.value() instanceof Map)) { log.error("Invalid connector status type {}", schemaAndValue.value().getClass()); return null; } @SuppressWarnings("unchecked") Map<String, Object> statusMap = (Map<String, Object>) schemaAndValue.value(); TaskStatus.State state = TaskStatus.State.valueOf((String) statusMap.get(STATE_KEY_NAME)); String trace = (String) statusMap.get(TRACE_KEY_NAME); String workerUrl = (String) statusMap.get(WORKER_ID_KEY_NAME); int generation = ((Long) statusMap.get(GENERATION_KEY_NAME)).intValue(); return new TaskStatus(taskId, state, workerUrl, generation, trace); } catch (Exception e) { log.error("Failed to deserialize task status", e); return null; } }
SchemaAndValue deserializedSchemaAndValue = valueConverter.toConnectData(namespace, rawEntry.getValue() != null ? rawEntry.getValue().array() : null); Object deserializedValue = deserializedSchemaAndValue.value(); OffsetUtils.validateFormat(deserializedValue);
private List<SourceRecord> getSourceRecords(List<SourceRecord> results) throws InterruptedException { while (!reader.hasNext() && !stopped.get()) { log.debug("Blocking until new S3 files are available."); // sleep and block here until new files are available Thread.sleep(s3PollInterval); readFromStoredOffsets(); } if (stopped.get()) { return results; } for (int i = 0; reader.hasNext() && i < maxPoll && !stopped.get(); i++) { S3SourceRecord record = reader.next(); updateOffsets(record.file(), record.offset()); String topic = topicMapping.computeIfAbsent(record.topic(), this::remapTopic); // we know the reader returned bytes so, we can cast the key+value and use a converter to // generate the "real" source record Optional<SchemaAndValue> key = keyConverter.map(c -> c.toConnectData(topic, record.key())); SchemaAndValue value = valueConverter.toConnectData(topic, record.value()); results.add(new SourceRecord(record.file().asMap(), record.offset().asMap(), topic, record.partition(), key.map(SchemaAndValue::schema).orElse(null), key.map(SchemaAndValue::value).orElse(null), value.schema(), value.value())); } log.debug("{} returning {} records.", name(), results.size()); return results; }
value = converter.toConnectData(topic, record.value()); } catch (DataException e) { log.error("Failed to convert config data to Kafka Connect format: ", e);
private SinkRecord convertAndTransformRecord(final ConsumerRecord<byte[], byte[]> msg) { SchemaAndValue keyAndSchema = retryWithToleranceOperator.execute(() -> keyConverter.toConnectData(msg.topic(), msg.key()), Stage.KEY_CONVERTER, keyConverter.getClass()); SchemaAndValue valueAndSchema = retryWithToleranceOperator.execute(() -> valueConverter.toConnectData(msg.topic(), msg.value()), Stage.VALUE_CONVERTER, valueConverter.getClass()); Headers headers = retryWithToleranceOperator.execute(() -> convertHeadersFor(msg), Stage.HEADER_CONVERTER, headerConverter.getClass()); if (retryWithToleranceOperator.failed()) { return null; } Long timestamp = ConnectUtils.checkAndConvertTimestamp(msg.timestamp()); SinkRecord origRecord = new SinkRecord(msg.topic(), msg.partition(), keyAndSchema.schema(), keyAndSchema.value(), valueAndSchema.schema(), valueAndSchema.value(), msg.offset(), timestamp, msg.timestampType(), headers); log.trace("{} Applying transformations to record in topic '{}' partition {} at offset {} and timestamp {} with key {} and value {}", this, msg.topic(), msg.partition(), msg.offset(), timestamp, keyAndSchema.value(), valueAndSchema.value()); return transformationChain.apply(origRecord); }
SchemaAndValue keyAndSchema = this.keyConverter.toConnectData(topic, consumerRecord.key()); this.valueConverter.toConnectData(topic, consumerRecord.value());