CustomEvent(Function<org.apache.kafka.clients.consumer.Consumer<K, V>, ? extends T> function, MonoSink<T> monoSink) { super(EventType.CUSTOM); this.function = function; this.monoSink = monoSink; } @Override
CustomEvent(Function<org.apache.kafka.clients.consumer.Consumer<K, V>, ? extends T> function, MonoSink<T> monoSink) { super(EventType.CUSTOM); this.function = function; this.monoSink = monoSink; } @Override
@Override public void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) { KafkaException exception; if ((exception = commitExceptions.poll()) != null) throw exception; for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { cluster.commitOffset(receiverOptions.groupId(), entry.getKey(), entry.getValue().offset()); } }
private Flux<ConsumerRecord<K, V>> transactionalRecords(TransactionManager transactionManager, ConsumerRecords<K, V> records) { if (records.isEmpty()) return Flux.empty(); CommittableBatch offsetBatch = new CommittableBatch(); for (ConsumerRecord<K, V> r : records) offsetBatch.updateOffset(new TopicPartition(r.topic(), r.partition()), r.offset()); return Flux.fromIterable(records) .concatWith(transactionManager.sendOffsets(offsetBatch.getAndClearOffsets().offsets(), receiverOptions.groupId())) .doAfterTerminate(() -> awaitingTransaction.set(false)); }
private Flux<ConsumerRecord<K, V>> transactionalRecords(TransactionManager transactionManager, ConsumerRecords<K, V> records) { if (records.isEmpty()) return Flux.empty(); CommittableBatch offsetBatch = new CommittableBatch(); for (ConsumerRecord<K, V> r : records) offsetBatch.updateOffset(new TopicPartition(r.topic(), r.partition()), r.offset()); return Flux.fromIterable(records) .concatWith(transactionManager.sendOffsets(offsetBatch.getAndClearOffsets().offsets(), receiverOptions.groupId())) .doAfterTerminate(() -> awaitingTransaction.set(false)); }
private void doAssign() { if (assignment.size() > 0 && rebalanceCallback != null) rebalanceCallback.onPartitionsAssigned(assignment); for (TopicPartition partition : assignment) { Long offset = cluster.committedOffset(receiverOptions.groupId(), partition); if (offset == null) { String reset = (String) receiverOptions.consumerProperties().getOrDefault(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"); switch (reset) { case "earliest": offset = 0L; break; case "latest": offset = (long) cluster.log(partition).size(); break; default: throw new KafkaException("Offset not available"); } } offsets.putIfAbsent(partition, offset); } }
@Override public OffsetAndMetadata committed(TopicPartition partition) { acquire(); try { Long offset = cluster.committedOffset(receiverOptions.groupId(), partition); return offset == null ? null : new OffsetAndMetadata(offset); } finally { release(); } }