@Bean public KafkaMessageListenerContainer<String, String> container( ConsumerFactory<String, String> consumerFactory, ConfigProperties config) { ContainerProperties containerProperties = new ContainerProperties(config.getTopic()); containerProperties.setMessageListener(listener()); containerProperties.setAckMode(AckMode.MANUAL_IMMEDIATE); return new KafkaMessageListenerContainer<>(consumerFactory, containerProperties); }
/** * Set whether the container should ack messages that throw exceptions or not. * @param ackOnError whether the container should acknowledge messages that throw * exceptions. * @return the spec. * @see ContainerProperties#setAckOnError(boolean) */ public KafkaMessageListenerContainerSpec<K, V> ackOnError(boolean ackOnError) { this.container.getContainerProperties().setAckOnError(ackOnError); return this; }
/** * Set the max time to block in the consumer waiting for records. * @param pollTimeout the timeout in ms; default 1000. * @return the spec. * @see ContainerProperties#setPollTimeout(long) */ public KafkaMessageListenerContainerSpec<K, V> pollTimeout(long pollTimeout) { this.container.getContainerProperties().setPollTimeout(pollTimeout); return this; }
private KafkaMessageListenerContainer<Long, Seed> runContainer(String topic, MessageListener<Long, Seed> listener) { ContainerProperties containerProps = new ContainerProperties(topic); containerProps.setMessageListener(listener); KafkaMessageListenerContainer<Long, Seed> tmp = createContainer(containerProps); tmp.setBeanName(topic + "message-listener"); return tmp; }
@Bean("operationsKafkaListenerContainerFactory") public ConcurrentKafkaListenerContainerFactory<String, Operation> operationsKafkaListenerContainerFactory( ConsumerFactory<String, Operation> consumerFactory, PlatformTransactionManager platformTransactionManager) { ConcurrentKafkaListenerContainerFactory<String, Operation> factory = new ConcurrentKafkaListenerContainerFactory<>(); factory.setConsumerFactory(consumerFactory); RetryTemplate retryTemplate = new RetryTemplate(); factory.setRetryTemplate(retryTemplate); factory.setConcurrency(eventApisConfiguration.getEventBus().getConsumer().getOperationSchedulerPoolSize()); ThreadPoolTaskScheduler scheduler = new ThreadPoolTaskScheduler(); scheduler.setPoolSize(eventApisConfiguration.getEventBus().getConsumer().getOperationSchedulerPoolSize()); scheduler.setBeanName("OperationsFactory-Scheduler"); scheduler.initialize(); factory.getContainerProperties().setScheduler(scheduler); ThreadPoolTaskScheduler consumerScheduler = new ThreadPoolTaskScheduler(); consumerScheduler.setPoolSize(eventApisConfiguration.getEventBus().getConsumer().getOperationSchedulerPoolSize()); consumerScheduler.setBeanName("OperationsFactory-ConsumerScheduler"); consumerScheduler.initialize(); factory.getContainerProperties().setPollTimeout(3000L); factory.getContainerProperties().setAckOnError(false); factory.getContainerProperties().setConsumerTaskExecutor(consumerScheduler); factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.RECORD); factory.getContainerProperties().setTransactionManager(platformTransactionManager); return factory; }
final ContainerProperties containerProperties = anonymous || extendedConsumerProperties.getExtension().isAutoRebalanceEnabled() ? new ContainerProperties(destination.getName()) : new ContainerProperties(topicPartitionInitialOffsets); if (this.transactionManager != null) { containerProperties.setTransactionManager(this.transactionManager); if (!extendedConsumerProperties.getExtension().isAutoCommitOffset()) { messageListenerContainer.getContainerProperties() .setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL); messageListenerContainer.getContainerProperties().setAckOnError(false); .setAckOnError(isAutoCommitOnError(extendedConsumerProperties));
KafkaMessageListenerContainerSpec(ConsumerFactory<K, V> consumerFactory, String... topics) { this(consumerFactory, new ContainerProperties(topics)); }
@Bean({"eventsKafkaListenerContainerFactory", "kafkaListenerContainerFactory"}) public ConcurrentKafkaListenerContainerFactory<String, PublishedEventWrapper> eventsKafkaListenerContainerFactory( EventMessageConverter eventMessageConverter, ConsumerFactory<String, PublishedEventWrapper> consumerFactory) { ConcurrentKafkaListenerContainerFactory<String, PublishedEventWrapper> factory = new ConcurrentKafkaListenerContainerFactory<>(); factory.setConsumerFactory(consumerFactory); factory.setConcurrency(eventApisConfiguration.getEventBus().getConsumer().getEventConcurrency()); factory.setMessageConverter(eventMessageConverter); factory.getContainerProperties().setPollTimeout(3000); ThreadPoolTaskScheduler scheduler = new ThreadPoolTaskScheduler(); scheduler.setPoolSize(eventApisConfiguration.getEventBus().getConsumer().getEventSchedulerPoolSize()); scheduler.setBeanName("EventsFactory-Scheduler"); scheduler.initialize(); factory.getContainerProperties().setScheduler(scheduler); factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.RECORD); return factory; }
/** * Set the ack mode to use when auto ack (in the configuration properties) is false. * <ul> * <li>RECORD: Ack after each record has been passed to the listener.</li> * <li>BATCH: Ack after each batch of records received from the consumer has been * passed to the listener</li> * <li>TIME: Ack after this number of milliseconds; (should be greater than * {@code #setPollTimeout(long) pollTimeout}.</li> * <li>COUNT: Ack after at least this number of records have been received</li> * <li>MANUAL: Listener is responsible for acking - use a * {@link AcknowledgingMessageListener}. * </ul> * @param ackMode the {@link AbstractMessageListenerContainer.AckMode}; default BATCH. * @return the spec. * @see AbstractMessageListenerContainer.AckMode */ public KafkaMessageListenerContainerSpec<K, V> ackMode(AbstractMessageListenerContainer.AckMode ackMode) { this.container.getContainerProperties().setAckMode(ackMode); return this; }
/** * Set the executor for threads that poll the consumer. * @param consumerTaskExecutor the executor * @return the spec. * @see ContainerProperties#setConsumerTaskExecutor(AsyncListenableTaskExecutor) */ public KafkaMessageListenerContainerSpec<K, V> consumerTaskExecutor( AsyncListenableTaskExecutor consumerTaskExecutor) { this.container.getContainerProperties().setConsumerTaskExecutor(consumerTaskExecutor); return this; }
/** * Set the number of outstanding record count after which offsets should be * committed when {@link AbstractMessageListenerContainer.AckMode#COUNT} * or {@link AbstractMessageListenerContainer.AckMode#COUNT_TIME} is being used. * @param count the count * @return the spec. * @see ContainerProperties#setAckCount(int) */ public KafkaMessageListenerContainerSpec<K, V> ackCount(int count) { this.container.getContainerProperties().setAckCount(count); return this; }
@Bean public KafkaMessageListenerContainer<String, String> container( ConsumerFactory<String, String> consumerFactory, ConfigProperties config) { ContainerProperties containerProperties = new ContainerProperties(config.getTopic()); containerProperties.setMessageListener(listener()); return new KafkaMessageListenerContainer<>(consumerFactory, containerProperties); }
KafkaMessageListenerContainerSpec(ConsumerFactory<K, V> consumerFactory, TopicPartitionInitialOffset... topicPartitions) { this(consumerFactory, new ContainerProperties(topicPartitions)); }
/** * Set the ack mode to use when auto ack (in the configuration properties) is false. * <ul> * <li>RECORD: Ack after each record has been passed to the listener.</li> * <li>BATCH: Ack after each batch of records received from the consumer has been * passed to the listener</li> * <li>TIME: Ack after this number of milliseconds; (should be greater than * {@code #setPollTimeout(long) pollTimeout}.</li> * <li>COUNT: Ack after at least this number of records have been received</li> * <li>MANUAL: Listener is responsible for acking - use a * {@link AcknowledgingMessageListener}. * </ul> * @param ackMode the {@link AbstractMessageListenerContainer.AckMode}; default BATCH. * @return the spec. * @see AbstractMessageListenerContainer.AckMode */ public KafkaMessageListenerContainerSpec<K, V> ackMode(AbstractMessageListenerContainer.AckMode ackMode) { this.container.getContainerProperties().setAckMode(ackMode); return this; }
/** * Set the executor for threads that poll the consumer. * @param consumerTaskExecutor the executor * @return the spec. * @see ContainerProperties#setConsumerTaskExecutor(AsyncListenableTaskExecutor) */ public KafkaMessageListenerContainerSpec<K, V> consumerTaskExecutor( AsyncListenableTaskExecutor consumerTaskExecutor) { this.container.getContainerProperties().setConsumerTaskExecutor(consumerTaskExecutor); return this; }
/** * Set the number of outstanding record count after which offsets should be * committed when {@link AbstractMessageListenerContainer.AckMode#COUNT} * or {@link AbstractMessageListenerContainer.AckMode#COUNT_TIME} is being used. * @param count the count * @return the spec. * @see ContainerProperties#setAckCount(int) */ public KafkaMessageListenerContainerSpec<K, V> ackCount(int count) { this.container.getContainerProperties().setAckCount(count); return this; }
@Bean(name = "messageListenerContainer") public ConcurrentMessageListenerContainer<String, PublishedEventWrapper> messageListenerContainer() { Map<String, Object> consumerProperties = eventApisConfiguration.getEventBus().buildConsumerProperties(); consumerProperties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false); consumerProperties.put(ConsumerConfig.METADATA_MAX_AGE_CONFIG, 3000); DefaultKafkaConsumerFactory<String, PublishedEventWrapper> consumerFactory = new DefaultKafkaConsumerFactory<>(consumerProperties, new StringDeserializer(), new JsonDeserializer<>(PublishedEventWrapper.class)); ContainerProperties containerProperties = new ContainerProperties(Pattern.compile(eventTopicRegexStr)); containerProperties.setMessageListener(new MultipleEventMessageListener(eventMessageListeners)); containerProperties.setAckMode(AbstractMessageListenerContainer.AckMode.BATCH); ConcurrentMessageListenerContainer<String, PublishedEventWrapper> messageListenerContainer = new ConcurrentMessageListenerContainer<>(consumerFactory, containerProperties); messageListenerContainer.setBeanName("emon-events"); return messageListenerContainer; }
consumerProps.put("auto.offset.reset", "earliest"); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps); ContainerProperties containerProps = new ContainerProperties("messages"); containerProps.setMessageListener((AcknowledgingMessageListener<Integer, String>) (message, ack) -> { LOGGER.info("Receiving: " + message); try {
KafkaMessageListenerContainerSpec(ConsumerFactory<K, V> consumerFactory, TopicPartitionInitialOffset... topicPartitions) { this(consumerFactory, new ContainerProperties(topicPartitions)); }
@Bean public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<ByteBuffer, ByteBuffer>> kafkaListenerContainerFactory() { ConcurrentKafkaListenerContainerFactory<ByteBuffer, ByteBuffer> factory = new ConcurrentKafkaListenerContainerFactory<>(); factory.setConsumerFactory(consumerFactory()); factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL); return factory; }