/** * Seek to the first offset for each of the given partition. * @param topicPartition topic partition for which seek * @return current KafkaConsumer instance */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumer<K, V> seekToBeginning(TopicPartition topicPartition) { delegate.seekToBeginning(topicPartition); return this; }
/** * Seek to the first offset for each of the given partition. * @param topicPartition topic partition for which seek * @return current KafkaConsumer instance */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumer<K, V> seekToBeginning(TopicPartition topicPartition) { delegate.seekToBeginning(topicPartition); return this; }
/** * Seek to the first offset for each of the given partitions. * @param topicPartitions topic partition for which seek * @return current KafkaConsumer instance */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumer<K, V> seekToBeginning(Set<TopicPartition> topicPartitions) { delegate.seekToBeginning(topicPartitions); return this; }
/** * Seek to the first offset for each of the given partitions. * @param topicPartitions topic partition for which seek * @return current KafkaConsumer instance */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumer<K, V> seekToBeginning(Set<TopicPartition> topicPartitions) { delegate.seekToBeginning(topicPartitions); return this; }
/** * Seek to the first offset for each of the given partitions. * <p> * Due to internal buffering of messages, * the will * continue to observe messages fetched with respect to the old offset * until some time <em>after</em> the given <code>completionHandler</code> * is called. In contrast, the once the given <code>completionHandler</code> * is called the {@link io.vertx.rxjava.kafka.client.consumer.KafkaConsumer#batchHandler} will only see messages * consistent with the new offset. * @param topicPartitions topic partition for which seek * @param completionHandler handler called on operation completed * @return current KafkaConsumer instance */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumer<K, V> seekToBeginning(Set<TopicPartition> topicPartitions, Handler<AsyncResult<Void>> completionHandler) { delegate.seekToBeginning(topicPartitions, completionHandler); return this; }
/** * Seek to the first offset for each of the given partition. * <p> * Due to internal buffering of messages, * the will * continue to observe messages fetched with respect to the old offset * until some time <em>after</em> the given <code>completionHandler</code> * is called. In contrast, the once the given <code>completionHandler</code> * is called the {@link io.vertx.rxjava.kafka.client.consumer.KafkaConsumer#batchHandler} will only see messages * consistent with the new offset. * @param topicPartition topic partition for which seek * @param completionHandler handler called on operation completed * @return current KafkaConsumer instance */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumer<K, V> seekToBeginning(TopicPartition topicPartition, Handler<AsyncResult<Void>> completionHandler) { delegate.seekToBeginning(topicPartition, completionHandler); return this; }
/** * Seek to the first offset for each of the given partition. * <p> * Due to internal buffering of messages, * the will * continue to observe messages fetched with respect to the old offset * until some time <em>after</em> the given <code>completionHandler</code> * is called. In contrast, the once the given <code>completionHandler</code> * is called the {@link io.vertx.rxjava.kafka.client.consumer.KafkaConsumer#batchHandler} will only see messages * consistent with the new offset. * @param topicPartition topic partition for which seek * @param completionHandler handler called on operation completed * @return current KafkaConsumer instance */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumer<K, V> seekToBeginning(TopicPartition topicPartition, Handler<AsyncResult<Void>> completionHandler) { delegate.seekToBeginning(topicPartition, completionHandler); return this; }
/** * Seek to the first offset for each of the given partitions. * <p> * Due to internal buffering of messages, * the will * continue to observe messages fetched with respect to the old offset * until some time <em>after</em> the given <code>completionHandler</code> * is called. In contrast, the once the given <code>completionHandler</code> * is called the {@link io.vertx.rxjava.kafka.client.consumer.KafkaConsumer#batchHandler} will only see messages * consistent with the new offset. * @param topicPartitions topic partition for which seek * @param completionHandler handler called on operation completed * @return current KafkaConsumer instance */ public io.vertx.rxjava.kafka.client.consumer.KafkaConsumer<K, V> seekToBeginning(Set<TopicPartition> topicPartitions, Handler<AsyncResult<Void>> completionHandler) { delegate.seekToBeginning(topicPartitions, completionHandler); return this; }
public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> seekToBeginning(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Set<java.util.Map<String, Object>> topicPartitions) { io.vertx.core.impl.ConversionHelper.fromObject(j_receiver.seekToBeginning(topicPartitions != null ? topicPartitions.stream().map(elt -> elt != null ? new io.vertx.kafka.client.common.TopicPartition(io.vertx.core.impl.ConversionHelper.toJsonObject(elt)) : null).collect(java.util.stream.Collectors.toSet()) : null)); return j_receiver; } public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> seekToBeginning(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Map<String, Object> topicPartition, io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>> completionHandler) {
public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> seekToBeginning(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Set<java.util.Map<String, Object>> topicPartitions, io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>> completionHandler) { io.vertx.core.impl.ConversionHelper.fromObject(j_receiver.seekToBeginning(topicPartitions != null ? topicPartitions.stream().map(elt -> elt != null ? new io.vertx.kafka.client.common.TopicPartition(io.vertx.core.impl.ConversionHelper.toJsonObject(elt)) : null).collect(java.util.stream.Collectors.toSet()) : null, completionHandler != null ? new io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>>() { public void handle(io.vertx.core.AsyncResult<java.lang.Void> ar) { completionHandler.handle(ar.map(event -> io.vertx.core.impl.ConversionHelper.fromObject(event))); } } : null)); return j_receiver; } public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> seekToEnd(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Map<String, Object> topicPartition) {
public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> seekToBeginning(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Map<String, Object> topicPartition, io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>> completionHandler) { io.vertx.core.impl.ConversionHelper.fromObject(j_receiver.seekToBeginning(topicPartition != null ? new io.vertx.kafka.client.common.TopicPartition(io.vertx.core.impl.ConversionHelper.toJsonObject(topicPartition)) : null, completionHandler != null ? new io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>>() { public void handle(io.vertx.core.AsyncResult<java.lang.Void> ar) { completionHandler.handle(ar.map(event -> io.vertx.core.impl.ConversionHelper.fromObject(event))); } } : null)); return j_receiver; } public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> seekToBeginning(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Set<java.util.Map<String, Object>> topicPartitions, io.vertx.core.Handler<io.vertx.core.AsyncResult<java.lang.Void>> completionHandler) {
public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> seekToBeginning(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Map<String, Object> topicPartition) { io.vertx.core.impl.ConversionHelper.fromObject(j_receiver.seekToBeginning(topicPartition != null ? new io.vertx.kafka.client.common.TopicPartition(io.vertx.core.impl.ConversionHelper.toJsonObject(topicPartition)) : null)); return j_receiver; } public static io.vertx.kafka.client.consumer.KafkaConsumer<java.lang.Object,java.lang.Object> seekToBeginning(io.vertx.kafka.client.consumer.KafkaConsumer<Object, Object> j_receiver, java.util.Set<java.util.Map<String, Object>> topicPartitions) {
@Test public void testPollTimeout(TestContext ctx) throws Exception { Async async = ctx.async(); String topicName = "testPollTimeout"; Properties config = kafkaCluster.useTo().getConsumerProperties(topicName, topicName, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); io.vertx.kafka.client.common.TopicPartition topicPartition = new io.vertx.kafka.client.common.TopicPartition(topicName, 0); KafkaConsumer<Object, Object> consumerWithCustomTimeout = KafkaConsumer.create(vertx, config); int pollingTimeout = 1500; // Set the polling timeout to 1500 ms (default is 1000) consumerWithCustomTimeout.pollTimeout(pollingTimeout); // Subscribe to the empty topic (we want the poll() call to timeout!) consumerWithCustomTimeout.subscribe(topicName, subscribeRes -> { consumerWithCustomTimeout.handler(rec -> {}); // Consumer will now immediately poll once long beforeSeek = System.currentTimeMillis(); consumerWithCustomTimeout.seekToBeginning(topicPartition, seekRes -> { long durationWShortTimeout = System.currentTimeMillis() - beforeSeek; ctx.assertTrue(durationWShortTimeout >= pollingTimeout, "Operation must take at least as long as the polling timeout"); consumerWithCustomTimeout.close(); async.countDown(); }); }); }
@Test public void testPollTimeout(TestContext ctx) throws Exception { Async async = ctx.async(); String topicName = "testPollTimeout"; Properties config = kafkaCluster.useTo().getConsumerProperties(topicName, topicName, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); io.vertx.kafka.client.common.TopicPartition topicPartition = new io.vertx.kafka.client.common.TopicPartition(topicName, 0); KafkaConsumer<Object, Object> consumerWithCustomTimeout = KafkaConsumer.create(vertx, config); int pollingTimeout = 1500; // Set the polling timeout to 1500 ms (default is 1000) consumerWithCustomTimeout.pollTimeout(pollingTimeout); // Subscribe to the empty topic (we want the poll() call to timeout!) consumerWithCustomTimeout.subscribe(topicName, subscribeRes -> { consumerWithCustomTimeout.handler(rec -> {}); // Consumer will now immediately poll once long beforeSeek = System.currentTimeMillis(); consumerWithCustomTimeout.seekToBeginning(topicPartition, seekRes -> { long durationWShortTimeout = System.currentTimeMillis() - beforeSeek; ctx.assertTrue(durationWShortTimeout >= pollingTimeout, "Operation must take at least as long as the polling timeout"); consumerWithCustomTimeout.close(); async.countDown(); }); }); }