this.retries = retries; this.time = time; this.sensors = new SenderMetrics(metricsRegistry, metadata, client, time); this.requestTimeoutMs = requestTimeoutMs; this.retryBackoffMs = retryBackoffMs;
completeBatch(batch, partResp, correlationId, now, receivedTimeMs + produceResponse.throttleTimeMs()); this.sensors.recordLatency(response.destination(), response.requestLatencyMs()); } else {
sensors.updateProduceRequestMetrics(batches);
sensors.updateProduceRequestMetrics(requests);
/** * Complete or retry the given batch of records. * @param batch The record batch * @param error The error (or null if none) * @param baseOffset The base offset assigned to the records if successful * @param correlationId The correlation id for the request * @param now The current POSIX time stamp in milliseconds */ private void completeBatch(RecordBatch batch, Errors error, long baseOffset, long correlationId, long now) { if (error != Errors.NONE && canRetry(batch, error)) { // retry log.warn("Got error produce response with correlation id {} on topic-partition {}, retrying ({} attempts left). Error: {}", correlationId, batch.topicPartition, this.retries - batch.attempts - 1, error); this.accumulator.reenqueue(batch, now); this.sensors.recordRetries(batch.topicPartition.topic(), batch.recordCount); } else { // tell the user the result of their request batch.done(baseOffset, error.exception()); this.accumulator.deallocate(batch); if (error != Errors.NONE) this.sensors.recordErrors(batch.topicPartition.topic(), batch.recordCount); } if (error.exception() instanceof InvalidMetadataException) metadata.requestUpdate(); }
/** * Handle a produce response */ private void handleResponse(ClientResponse response, long now) { int correlationId = response.request().request().header().correlationId(); log.trace("Received produce response from node {} with correlation id {}", response.request().request().destination(), correlationId); @SuppressWarnings("unchecked") Map<TopicPartition, RecordBatch> batches = (Map<TopicPartition, RecordBatch>) response.request().attachment(); // if we have a response, parse it if (response.hasResponse()) { ProduceResponse produceResponse = new ProduceResponse(response.responseBody()); for (Map.Entry<TopicPartition, ProduceResponse.PartitionResponse> entry : produceResponse.responses().entrySet()) { TopicPartition tp = entry.getKey(); ProduceResponse.PartitionResponse partResp = entry.getValue(); Errors error = Errors.forCode(partResp.errorCode); RecordBatch batch = batches.get(tp); completeBatch(batch, error, partResp.baseOffset, correlationId, now); } this.sensors.recordLatency(response.request().request().destination(), response.requestLatencyMs()); } else { // this is the acks = 0 case, just complete all requests for (RecordBatch batch : batches.values()) completeBatch(batch, Errors.NONE, -1L, correlationId, now); } }
public Sender(KafkaClient client, Metadata metadata, RecordAccumulator accumulator, int maxRequestSize, short acks, int retries, int requestTimeout, Metrics metrics, Time time, String clientId) { this.client = client; this.accumulator = accumulator; this.metadata = metadata; this.maxRequestSize = maxRequestSize; this.running = true; this.requestTimeout = requestTimeout; this.acks = acks; this.retries = retries; this.time = time; this.clientId = clientId; this.sensors = new SenderMetrics(metrics); }
this.accumulator.splitAndReenqueue(batch); this.accumulator.deallocate(batch); this.sensors.recordBatchSplit(); } else if (error != Errors.NONE) { if (canRetry(batch, response, now)) {
this.sensors.recordErrors(batch.topicPartition.topic(), batch.recordCount);
public void updateProduceRequestMetrics(Map<Integer, List<ProducerBatch>> batches) { long now = time.milliseconds(); for (List<ProducerBatch> nodeBatch : batches.values()) { int records = 0; for (ProducerBatch batch : nodeBatch) { // register all per-topic metrics at once String topic = batch.topicPartition.topic(); maybeRegisterTopicMetrics(topic); // per-topic record send rate String topicRecordsCountName = "topic." + topic + ".records-per-batch"; Sensor topicRecordCount = Utils.notNull(this.metrics.getSensor(topicRecordsCountName)); topicRecordCount.record(batch.recordCount); // per-topic bytes send rate String topicByteRateName = "topic." + topic + ".bytes"; Sensor topicByteRate = Utils.notNull(this.metrics.getSensor(topicByteRateName)); topicByteRate.record(batch.estimatedSizeInBytes()); // per-topic compression rate String topicCompressionRateName = "topic." + topic + ".compression-rate"; Sensor topicCompressionRate = Utils.notNull(this.metrics.getSensor(topicCompressionRateName)); topicCompressionRate.record(batch.compressionRatio()); // global metrics this.batchSizeSensor.record(batch.estimatedSizeInBytes(), now); this.queueTimeSensor.record(batch.queueTimeMs(), now); this.compressionRateSensor.record(batch.compressionRatio()); this.maxRecordSizeSensor.record(batch.maxRecordSize, now); records += batch.recordCount; } this.recordsPerRequestSensor.record(records, now); } }
private void reenqueueBatch(ProducerBatch batch, long currentTimeMs) { this.accumulator.reenqueue(batch, currentTimeMs); maybeRemoveFromInflightBatches(batch); this.sensors.recordRetries(batch.topicPartition.topic(), batch.recordCount); }