private void addHeaders(final FlowFile flowFile, final Map<String, String> additionalAttributes, final ProducerRecord<?, ?> record) { if (attributeNameRegex == null) { return; } final Headers headers = record.headers(); for (final Map.Entry<String, String> entry : flowFile.getAttributes().entrySet()) { if (attributeNameRegex.matcher(entry.getKey()).matches()) { headers.add(entry.getKey(), entry.getValue().getBytes(headerCharacterSet)); } } for (final Map.Entry<String, String> entry : additionalAttributes.entrySet()) { if (attributeNameRegex.matcher(entry.getKey()).matches()) { headers.add(entry.getKey(), entry.getValue().getBytes(headerCharacterSet)); } } }
private void addHeaders(final FlowFile flowFile, final Map<String, String> additionalAttributes, final ProducerRecord<?, ?> record) { if (attributeNameRegex == null) { return; } final Headers headers = record.headers(); for (final Map.Entry<String, String> entry : flowFile.getAttributes().entrySet()) { if (attributeNameRegex.matcher(entry.getKey()).matches()) { headers.add(entry.getKey(), entry.getValue().getBytes(headerCharacterSet)); } } for (final Map.Entry<String, String> entry : additionalAttributes.entrySet()) { if (attributeNameRegex.matcher(entry.getKey()).matches()) { headers.add(entry.getKey(), entry.getValue().getBytes(headerCharacterSet)); } } }
private void addHeaders(final FlowFile flowFile, final Map<String, String> additionalAttributes, final ProducerRecord<?, ?> record) { if (attributeNameRegex == null) { return; } final Headers headers = record.headers(); for (final Map.Entry<String, String> entry : flowFile.getAttributes().entrySet()) { if (attributeNameRegex.matcher(entry.getKey()).matches()) { headers.add(entry.getKey(), entry.getValue().getBytes(headerCharacterSet)); } } for (final Map.Entry<String, String> entry : additionalAttributes.entrySet()) { if (attributeNameRegex.matcher(entry.getKey()).matches()) { headers.add(entry.getKey(), entry.getValue().getBytes(headerCharacterSet)); } } }
/** * computes partition for given record. */ private int partition(ProducerRecord<K, V> record, Cluster cluster) { Integer partition = record.partition(); String topic = record.topic(); if (partition != null) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); // they have given us a partition, use it if (partition < 0 || partition >= numPartitions) throw new IllegalArgumentException("Invalid partition given with record: " + partition + " is not in the range [0..." + numPartitions + "]."); return partition; } byte[] keyBytes = keySerializer.serialize(topic, record.headers(), record.key()); byte[] valueBytes = valueSerializer.serialize(topic, record.headers(), record.value()); return this.partitioner.partition(topic, record.key(), keyBytes, record.value(), valueBytes, cluster); }
record.headers().add(new RecordHeader("test", "header2".getBytes())); producer.send(record, null); record.headers().add(new RecordHeader("test", "test".getBytes())); fail("Expected IllegalStateException to be raised"); } catch (IllegalStateException ise) { assertArrayEquals(record.headers().lastHeader("test").value(), "header2".getBytes()); verify(valueSerializer).serialize(topic, record.headers(), value); verify(keySerializer).serialize(topic, record.headers(), key);
byte[] serializedKey; try { serializedKey = keySerializer.serialize(record.topic(), record.headers(), record.key()); } catch (ClassCastException cce) { throw new SerializationException("Can't convert key of class " + record.key().getClass().getName() + serializedValue = valueSerializer.serialize(record.topic(), record.headers(), record.value()); } catch (ClassCastException cce) { throw new SerializationException("Can't convert value of class " + record.value().getClass().getName() + tp = new TopicPartition(record.topic(), partition); setReadOnly(record.headers()); Header[] headers = record.headers().toArray();
@SuppressWarnings("unchecked") @Override public ListenableFuture<SendResult<K, V>> send(Message<?> message) { ProducerRecord<?, ?> producerRecord = this.messageConverter.fromMessage(message, this.defaultTopic); if (!producerRecord.headers().iterator().hasNext()) { // possibly no Jackson byte[] correlationId = message.getHeaders().get(KafkaHeaders.CORRELATION_ID, byte[].class); if (correlationId != null) { producerRecord.headers().add(KafkaHeaders.CORRELATION_ID, correlationId); } } return doSend((ProducerRecord<K, V>) producerRecord); }
Assert.notNull(correlationId, "the created 'correlationId' cannot be null"); boolean hasReplyTopic = false; Headers headers = record.headers(); Iterator<Header> iterator = headers.iterator(); while (iterator.hasNext() && !hasReplyTopic) {
@Test public void testTimeout() throws Exception { ReplyingKafkaTemplate<Integer, String, String> template = createTemplate(A_REPLY); try { template.setReplyTimeout(1); ProducerRecord<Integer, String> record = new ProducerRecord<>(A_REQUEST, "fiz"); record.headers().add(new RecordHeader(KafkaHeaders.REPLY_TOPIC, A_REPLY.getBytes())); RequestReplyFuture<Integer, String, String> future = template.sendAndReceive(record); future.getSendFuture().get(10, TimeUnit.SECONDS); // send ok try { future.get(30, TimeUnit.SECONDS); fail("Expected Exception"); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw e; } catch (ExecutionException e) { assertThat(e).hasCauseExactlyInstanceOf(KafkaException.class).hasMessageContaining("Reply timed out"); } } finally { template.stop(); } }
private ProducerRecord<byte[], byte[]> createRecord(String topic, String message) { ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(topic, message.getBytes(StandardCharsets.UTF_8)); producerRecord.headers().add(KafkaUtils.MESSAGE_ID_KEY, UUID.randomUUID().toString().getBytes(StandardCharsets.UTF_8)); return producerRecord; } }
private void addHeaders(final FlowFile flowFile, final Map<String, String> additionalAttributes, final ProducerRecord<?, ?> record) { if (attributeNameRegex == null) { return; } final Headers headers = record.headers(); for (final Map.Entry<String, String> entry : flowFile.getAttributes().entrySet()) { if (attributeNameRegex.matcher(entry.getKey()).matches()) { headers.add(entry.getKey(), entry.getValue().getBytes(headerCharacterSet)); } } for (final Map.Entry<String, String> entry : additionalAttributes.entrySet()) { if (attributeNameRegex.matcher(entry.getKey()).matches()) { headers.add(entry.getKey(), entry.getValue().getBytes(headerCharacterSet)); } } }
private void addHeaders(final FlowFile flowFile, final Map<String, String> additionalAttributes, final ProducerRecord<?, ?> record) { if (attributeNameRegex == null) { return; } final Headers headers = record.headers(); for (final Map.Entry<String, String> entry : flowFile.getAttributes().entrySet()) { if (attributeNameRegex.matcher(entry.getKey()).matches()) { headers.add(entry.getKey(), entry.getValue().getBytes(headerCharacterSet)); } } for (final Map.Entry<String, String> entry : additionalAttributes.entrySet()) { if (attributeNameRegex.matcher(entry.getKey()).matches()) { headers.add(entry.getKey(), entry.getValue().getBytes(headerCharacterSet)); } } }
static <K,V> Scope buildAndInjectSpan(ProducerRecord<K, V> record, Tracer tracer, BiFunction<String, ProducerRecord, String> producerSpanNameProvider) { Tracer.SpanBuilder spanBuilder = tracer.buildSpan(producerSpanNameProvider.apply("send", record)) .withTag(Tags.SPAN_KIND.getKey(), Tags.SPAN_KIND_PRODUCER); SpanContext spanContext = TracingKafkaUtils.extract(record.headers(), tracer); if (spanContext != null) { spanBuilder.asChildOf(spanContext); } Scope scope = spanBuilder.startActive(false); SpanDecorator.onSend(record, scope.span()); try { TracingKafkaUtils.inject(scope.span().context(), record.headers(), tracer); } catch (Exception e) { // it can happen if headers are read only (when record is sent second time) logger.error("failed to inject span context. sending record second time?", e); } return scope; }
static <K,V> Scope buildAndInjectSpan(ProducerRecord<K, V> record, Tracer tracer, BiFunction<String, ProducerRecord, String> producerSpanNameProvider) { Tracer.SpanBuilder spanBuilder = tracer.buildSpan(producerSpanNameProvider.apply("send", record)) .withTag(Tags.SPAN_KIND.getKey(), Tags.SPAN_KIND_PRODUCER); SpanContext spanContext = TracingKafkaUtils.extract(record.headers(), tracer); if (spanContext != null) { spanBuilder.asChildOf(spanContext); } Scope scope = spanBuilder.startActive(false); SpanDecorator.onSend(record, scope.span()); try { TracingKafkaUtils.inject(scope.span().context(), record.headers(), tracer); } catch (Exception e) { // it can happen if headers are read only (when record is sent second time) logger.error("failed to inject span context. sending record second time?", e); } return scope; }
/** * Converts a {@link ProducerRecord} a {@link SenderRecord} to send to Kafka. * * @param record the producer record to send to Kafka * @param correlationMetadata Additional correlation metadata that is not sent to Kafka, but is * included in the response to match {@link SenderResult} to this record. * @return new sender record that can be sent to Kafka using {@link KafkaSender#send(org.reactivestreams.Publisher)} */ public static <K, V, T> SenderRecord<K, V, T> create(ProducerRecord<K, V> record, T correlationMetadata) { return new SenderRecord<>(record.topic(), record.partition(), record.timestamp(), record.key(), record.value(), correlationMetadata, record.headers()); }
/** * Converts a {@link ProducerRecord} a {@link SenderRecord} to send to Kafka. * * @param record the producer record to send to Kafka * @param correlationMetadata Additional correlation metadata that is not sent to Kafka, but is * included in the response to match {@link SenderResult} to this record. * @return new sender record that can be sent to Kafka using {@link KafkaSender#send(org.reactivestreams.Publisher)} */ public static <K, V, T> SenderRecord<K, V, T> create(ProducerRecord<K, V> record, T correlationMetadata) { return new SenderRecord<>(record.topic(), record.partition(), record.timestamp(), record.key(), record.value(), correlationMetadata, record.headers()); }
@Test public void testStreamProduce(TestContext ctx) throws Exception { String topicName = "testStreamProduce"; Properties config = kafkaCluster.useTo().getProducerProperties("testStreamProduce_producer"); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); producer = producer(Vertx.vertx(), config); producer.exceptionHandler(ctx::fail); int numMessages = 100000; for (int i = 0;i < numMessages;i++) { ProducerRecord<String, String> record = new ProducerRecord<>(topicName, 0, "key-" + i, "value-" + i); record.headers().add("header_key", ("header_value-" + i).getBytes()); producer.write(record); } assertReceiveMessages(ctx, topicName, numMessages); }
@Test public void testStreamProduce(TestContext ctx) throws Exception { String topicName = "testStreamProduce"; Properties config = kafkaCluster.useTo().getProducerProperties("testStreamProduce_producer"); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); producer = producer(Vertx.vertx(), config); producer.exceptionHandler(ctx::fail); int numMessages = 100000; for (int i = 0;i < numMessages;i++) { ProducerRecord<String, String> record = new ProducerRecord<>(topicName, 0, "key-" + i, "value-" + i); record.headers().add("header_key", ("header_value-" + i).getBytes()); producer.write(record); } assertReceiveMessages(ctx, topicName, numMessages); }
void populateContextHeaders(ProducerRecord<byte[], byte[]> producerRecord, ProcessingContext context) { Headers headers = producerRecord.headers(); if (context.consumerRecord() != null) { headers.add(ERROR_HEADER_ORIG_TOPIC, toBytes(context.consumerRecord().topic())); headers.add(ERROR_HEADER_ORIG_PARTITION, toBytes(context.consumerRecord().partition())); headers.add(ERROR_HEADER_ORIG_OFFSET, toBytes(context.consumerRecord().offset())); } headers.add(ERROR_HEADER_CONNECTOR_NAME, toBytes(connectorTaskId.connector())); headers.add(ERROR_HEADER_TASK_ID, toBytes(String.valueOf(connectorTaskId.task()))); headers.add(ERROR_HEADER_STAGE, toBytes(context.stage().name())); headers.add(ERROR_HEADER_EXECUTING_CLASS, toBytes(context.executingClass().getName())); if (context.error() != null) { headers.add(ERROR_HEADER_EXCEPTION, toBytes(context.error().getClass().getName())); headers.add(ERROR_HEADER_EXCEPTION_MESSAGE, toBytes(context.error().getMessage())); byte[] trace; if ((trace = stacktrace(context.error())) != null) { headers.add(ERROR_HEADER_EXCEPTION_STACK_TRACE, trace); } } }