public OffsetAndMetadata committed(TopicPartition partition, Duration timeout) { return delegate.committed(partition, timeout); }
@Override public OffsetAndMetadata committed(TopicPartition partition) { return delegate.committed(partition); }
private void rollback(final TopicPartition topicPartition) { OffsetAndMetadata offsetAndMetadata = uncommittedOffsetsMap.get(topicPartition); if (offsetAndMetadata == null) { offsetAndMetadata = kafkaConsumer.committed(topicPartition); } final long offset = offsetAndMetadata.offset(); kafkaConsumer.seek(topicPartition, offset); }
private void rollback(final TopicPartition topicPartition) { try { OffsetAndMetadata offsetAndMetadata = uncommittedOffsetsMap.get(topicPartition); if (offsetAndMetadata == null) { offsetAndMetadata = kafkaConsumer.committed(topicPartition); } final long offset = offsetAndMetadata == null ? 0L : offsetAndMetadata.offset(); kafkaConsumer.seek(topicPartition, offset); } catch (final Exception rollbackException) { logger.warn("Attempted to rollback Kafka message offset but was unable to do so", rollbackException); } }
private void rollback(final TopicPartition topicPartition) { try { OffsetAndMetadata offsetAndMetadata = uncommittedOffsetsMap.get(topicPartition); if (offsetAndMetadata == null) { offsetAndMetadata = kafkaConsumer.committed(topicPartition); } final long offset = offsetAndMetadata == null ? 0L : offsetAndMetadata.offset(); kafkaConsumer.seek(topicPartition, offset); } catch (final Exception rollbackException) { logger.warn("Attempted to rollback Kafka message offset but was unable to do so", rollbackException); } }
private void rollback(final TopicPartition topicPartition) { try { OffsetAndMetadata offsetAndMetadata = uncommittedOffsetsMap.get(topicPartition); if (offsetAndMetadata == null) { offsetAndMetadata = kafkaConsumer.committed(topicPartition); } final long offset = offsetAndMetadata == null ? 0L : offsetAndMetadata.offset(); kafkaConsumer.seek(topicPartition, offset); } catch (final Exception rollbackException) { logger.warn("Attempted to rollback Kafka message offset but was unable to do so", rollbackException); } }
@KafkaListener(id = "batchAckListener", topics = { "annotated26", "annotated27" }, containerFactory = "batchFactory") public void batchAckListener(List<String> in, @Header(KafkaHeaders.RECEIVED_PARTITION_ID) List<Integer> partitions, @Header(KafkaHeaders.RECEIVED_TOPIC) List<String> topics, Consumer<?, ?> consumer) { for (int i = 0; i < topics.size(); i++) { this.latch17.countDown(); String topic = topics.get(i); if ("annotated26".equals(topic) && consumer.committed( new org.apache.kafka.common.TopicPartition(topic, partitions.get(i))).offset() == 1) { this.latch18.countDown(); } else if ("annotated27".equals(topic) && consumer.committed( new org.apache.kafka.common.TopicPartition(topic, partitions.get(i))).offset() == 3) { this.latch18.countDown(); } } }
@Override public OffsetAndMetadata committed(TopicPartition partition) { return consumer.committed(partition); }
@Override public OffsetAndMetadata committed(TopicPartition topicPartition, Duration duration) { return consumer.committed(topicPartition, duration); }
@Override public OffsetAndMetadata committed(TopicPartition topicPartition, Duration duration) { return consumer.committed(topicPartition, duration); }
@Override public OffsetAndMetadata committed(TopicPartition partition) { return consumer.committed(partition); }
@Override public OffsetAndMetadata committed(TopicPartition partition) { return kafkaConsumer.committed(partition); }
@Override public OffsetAndMetadata committed(TopicPartition partition, Duration timeout) { return kafkaConsumer.committed(partition, timeout); }
public OffsetAndMetadata committed(TopicPartition partition, Duration timeout) { return delegate.committed(partition, timeout); }
@Override public void committed(TopicPartition topicPartition, Handler<AsyncResult<OffsetAndMetadata>> handler) { this.submitTask((consumer, future) -> { OffsetAndMetadata result = consumer.committed(topicPartition); if (future != null) { future.complete(result); } }, handler); }
@Override public Long committedSafeOffset(TopicPartition tp) { OffsetAndMetadata rawOffsetAndMetadata = _kafkaConsumer.committed(tp); if (rawOffsetAndMetadata == null || rawOffsetAndMetadata.metadata().isEmpty()) { return null; } return rawOffsetAndMetadata.offset(); }
private void rollback(final TopicPartition topicPartition) { OffsetAndMetadata offsetAndMetadata = uncommittedOffsetsMap.get(topicPartition); if (offsetAndMetadata == null) { offsetAndMetadata = kafkaConsumer.committed(topicPartition); } final long offset = offsetAndMetadata.offset(); kafkaConsumer.seek(topicPartition, offset); }
private void rollback(final TopicPartition topicPartition) { try { OffsetAndMetadata offsetAndMetadata = uncommittedOffsetsMap.get(topicPartition); if (offsetAndMetadata == null) { offsetAndMetadata = kafkaConsumer.committed(topicPartition); } final long offset = offsetAndMetadata == null ? 0L : offsetAndMetadata.offset(); kafkaConsumer.seek(topicPartition, offset); } catch (final Exception rollbackException) { logger.warn("Attempted to rollback Kafka message offset but was unable to do so", rollbackException); } }
private void rollback(final TopicPartition topicPartition) { try { OffsetAndMetadata offsetAndMetadata = uncommittedOffsetsMap.get(topicPartition); if (offsetAndMetadata == null) { offsetAndMetadata = kafkaConsumer.committed(topicPartition); } final long offset = offsetAndMetadata == null ? 0L : offsetAndMetadata.offset(); kafkaConsumer.seek(topicPartition, offset); } catch (final Exception rollbackException) { logger.warn("Attempted to rollback Kafka message offset but was unable to do so", rollbackException); } }
private long committedCount(KafkaReceiver<Integer, String> receiver) { long committed = 0; for (int j = 0; j < partitions; j++) { TopicPartition p = new TopicPartition(topic, j); OffsetAndMetadata offset = receiver.doOnConsumer(c -> c.committed(p)).block(Duration.ofSeconds(receiveTimeoutMillis)); if (offset != null && offset.offset() > 0) committed += offset.offset(); } return committed; }