@Override protected boolean getIsAutoCommitEnabled() { return PropertiesUtil.getBoolean(kafkaProperties, "auto.commit.enable", true) && PropertiesUtil.getLong(kafkaProperties, "auto.commit.interval.ms", 60000) > 0; }
public Kafka08PartitionDiscoverer( KafkaTopicsDescriptor topicsDescriptor, int indexOfThisSubtask, int numParallelSubtasks, Properties kafkaProperties) { super(topicsDescriptor, indexOfThisSubtask, numParallelSubtasks); checkNotNull(kafkaProperties); String seedBrokersConfString = kafkaProperties.getProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG); checkArgument(seedBrokersConfString != null && !seedBrokersConfString.isEmpty(), "Configuration property %s not set", ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG); this.seedBrokerAddresses = seedBrokersConfString.split(","); // evenly distribute seed brokers across subtasks, to // avoid too much pressure on a single broker on startup this.currentContactSeedBrokerIndex = indexOfThisSubtask % seedBrokerAddresses.length; this.numRetries = getInt(kafkaProperties, GET_PARTITIONS_RETRIES_KEY, DEFAULT_GET_PARTITIONS_RETRIES); this.soTimeout = getInt(kafkaProperties, "socket.timeout.ms", 30000); this.bufferSize = getInt(kafkaProperties, "socket.receive.buffer.bytes", 65536); }
/** * Get long from properties. * This method only logs if the long is not valid. * * @param config Properties * @param key key in Properties * @param defaultValue default value if value is not set * @return default or value of key */ public static long getLong(Properties config, String key, long defaultValue, Logger logger) { try { return getLong(config, key, defaultValue); } catch (IllegalArgumentException iae) { logger.warn(iae.getMessage()); return defaultValue; } }
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { long autoCommitInterval = (offsetCommitMode == OffsetCommitMode.KAFKA_PERIODIC) ? PropertiesUtil.getLong(kafkaProperties, "auto.commit.interval.ms", 60000) : -1; // this disables the periodic offset committer thread in the fetcher return new Kafka08Fetcher<>( sourceContext, assignedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, runtimeContext, deserializer, kafkaProperties, autoCommitInterval, consumerMetricGroup, useMetrics); }
@Override protected boolean getIsAutoCommitEnabled() { return getBoolean(properties, ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true) && PropertiesUtil.getLong(properties, ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 5000) > 0; }
/** * Get long from properties. * This method only logs if the long is not valid. * * @param config Properties * @param key key in Properties * @param defaultValue default value if value is not set * @return default or value of key */ public static long getLong(Properties config, String key, long defaultValue, Logger logger) { try { return getLong(config, key, defaultValue); } catch (IllegalArgumentException iae) { logger.warn(iae.getMessage()); return defaultValue; } }
public SimpleConsumerThread( Kafka08Fetcher<T> owner, ExceptionProxy errorHandler, Properties config, Node broker, List<KafkaTopicPartitionState<TopicAndPartition>> seedPartitions, ClosableBlockingQueue<KafkaTopicPartitionState<TopicAndPartition>> unassignedPartitions, KeyedDeserializationSchema<T> deserializer, long invalidOffsetBehavior) { this.owner = owner; this.errorHandler = errorHandler; this.broker = broker; // all partitions should have been assigned a starting offset by the fetcher checkAllPartitionsHaveDefinedStartingOffsets(seedPartitions); this.partitions = seedPartitions; this.deserializer = requireNonNull(deserializer); this.unassignedPartitions = requireNonNull(unassignedPartitions); this.newPartitionsQueue = new ClosableBlockingQueue<>(); this.invalidOffsetBehavior = invalidOffsetBehavior; // these are the actual configuration values of Kafka + their original default values. this.soTimeout = getInt(config, "socket.timeout.ms", 30000); this.minBytes = getInt(config, "fetch.min.bytes", 1); this.maxWait = getInt(config, "fetch.wait.max.ms", 100); this.fetchSize = getInt(config, "fetch.message.max.bytes", 1048576); this.bufferSize = getInt(config, "socket.receive.buffer.bytes", 65536); this.reconnectLimit = getInt(config, "flink.simple-consumer-reconnectLimit", 3); String groupId = config.getProperty("group.id", "flink-kafka-consumer-legacy-" + broker.id()); this.clientId = config.getProperty("client.id", groupId); }
@Override protected boolean getIsAutoCommitEnabled() { return getBoolean(properties, ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true) && PropertiesUtil.getLong(properties, ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 5000) > 0; }
/** * Get long from properties. * This method only logs if the long is not valid. * * @param config Properties * @param key key in Properties * @param defaultValue default value if value is not set * @return default or value of key */ public static long getLong(Properties config, String key, long defaultValue, Logger logger) { try { return getLong(config, key, defaultValue); } catch (IllegalArgumentException iae) { logger.warn(iae.getMessage()); return defaultValue; } }
public Kafka08PartitionDiscoverer( KafkaTopicsDescriptor topicsDescriptor, int indexOfThisSubtask, int numParallelSubtasks, Properties kafkaProperties) { super(topicsDescriptor, indexOfThisSubtask, numParallelSubtasks); checkNotNull(kafkaProperties); String seedBrokersConfString = kafkaProperties.getProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG); checkArgument(seedBrokersConfString != null && !seedBrokersConfString.isEmpty(), "Configuration property %s not set", ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG); this.seedBrokerAddresses = seedBrokersConfString.split(","); // evenly distribute seed brokers across subtasks, to // avoid too much pressure on a single broker on startup this.currentContactSeedBrokerIndex = indexOfThisSubtask % seedBrokerAddresses.length; this.numRetries = getInt(kafkaProperties, GET_PARTITIONS_RETRIES_KEY, DEFAULT_GET_PARTITIONS_RETRIES); this.soTimeout = getInt(kafkaProperties, "socket.timeout.ms", 30000); this.bufferSize = getInt(kafkaProperties, "socket.receive.buffer.bytes", 65536); }
private FlinkKafkaConsumer( List<String> topics, Pattern subscriptionPattern, KeyedDeserializationSchema<T> deserializer, Properties props) { super( topics, subscriptionPattern, deserializer, getLong( checkNotNull(props, "props"), KEY_PARTITION_DISCOVERY_INTERVAL_MILLIS, PARTITION_DISCOVERY_DISABLED), !getBoolean(props, KEY_DISABLE_METRICS, false)); this.properties = props; setDeserializer(this.properties); // configure the polling timeout try { if (properties.containsKey(KEY_POLL_TIMEOUT)) { this.pollTimeout = Long.parseLong(properties.getProperty(KEY_POLL_TIMEOUT)); } else { this.pollTimeout = DEFAULT_POLL_TIMEOUT; } } catch (Exception e) { throw new IllegalArgumentException("Cannot parse poll timeout for '" + KEY_POLL_TIMEOUT + '\'', e); } }
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { long autoCommitInterval = (offsetCommitMode == OffsetCommitMode.KAFKA_PERIODIC) ? PropertiesUtil.getLong(kafkaProperties, "auto.commit.interval.ms", 60000) : -1; // this disables the periodic offset committer thread in the fetcher return new Kafka08Fetcher<>( sourceContext, assignedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, runtimeContext, deserializer, kafkaProperties, autoCommitInterval, consumerMetricGroup, useMetrics); }
public SimpleConsumerThread( Kafka08Fetcher<T> owner, ExceptionProxy errorHandler, Properties config, Node broker, List<KafkaTopicPartitionState<TopicAndPartition>> seedPartitions, ClosableBlockingQueue<KafkaTopicPartitionState<TopicAndPartition>> unassignedPartitions, KeyedDeserializationSchema<T> deserializer, long invalidOffsetBehavior) { this.owner = owner; this.errorHandler = errorHandler; this.broker = broker; // all partitions should have been assigned a starting offset by the fetcher checkAllPartitionsHaveDefinedStartingOffsets(seedPartitions); this.partitions = seedPartitions; this.deserializer = requireNonNull(deserializer); this.unassignedPartitions = requireNonNull(unassignedPartitions); this.newPartitionsQueue = new ClosableBlockingQueue<>(); this.invalidOffsetBehavior = invalidOffsetBehavior; // these are the actual configuration values of Kafka + their original default values. this.soTimeout = getInt(config, "socket.timeout.ms", 30000); this.minBytes = getInt(config, "fetch.min.bytes", 1); this.maxWait = getInt(config, "fetch.wait.max.ms", 100); this.fetchSize = getInt(config, "fetch.message.max.bytes", 1048576); this.bufferSize = getInt(config, "socket.receive.buffer.bytes", 65536); this.reconnectLimit = getInt(config, "flink.simple-consumer-reconnectLimit", 3); }
private FlinkKafkaConsumer09( List<String> topics, Pattern subscriptionPattern, KeyedDeserializationSchema<T> deserializer, Properties props) { super( topics, subscriptionPattern, deserializer, getLong( checkNotNull(props, "props"), KEY_PARTITION_DISCOVERY_INTERVAL_MILLIS, PARTITION_DISCOVERY_DISABLED), !getBoolean(props, KEY_DISABLE_METRICS, false)); this.properties = props; setDeserializer(this.properties); // configure the polling timeout try { if (properties.containsKey(KEY_POLL_TIMEOUT)) { this.pollTimeout = Long.parseLong(properties.getProperty(KEY_POLL_TIMEOUT)); } else { this.pollTimeout = DEFAULT_POLL_TIMEOUT; } } catch (Exception e) { throw new IllegalArgumentException("Cannot parse poll timeout for '" + KEY_POLL_TIMEOUT + '\'', e); } }
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { long autoCommitInterval = (offsetCommitMode == OffsetCommitMode.KAFKA_PERIODIC) ? PropertiesUtil.getLong(kafkaProperties, "auto.commit.interval.ms", 60000) : -1; // this disables the periodic offset committer thread in the fetcher return new Kafka08Fetcher<>( sourceContext, assignedPartitionsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, runtimeContext, deserializer, kafkaProperties, autoCommitInterval, consumerMetricGroup, useMetrics); }
public Kafka08PartitionDiscoverer( KafkaTopicsDescriptor topicsDescriptor, int indexOfThisSubtask, int numParallelSubtasks, Properties kafkaProperties) { super(topicsDescriptor, indexOfThisSubtask, numParallelSubtasks); checkNotNull(kafkaProperties); String seedBrokersConfString = kafkaProperties.getProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG); checkArgument(seedBrokersConfString != null && !seedBrokersConfString.isEmpty(), "Configuration property %s not set", ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG); this.seedBrokerAddresses = seedBrokersConfString.split(","); // evenly distribute seed brokers across subtasks, to // avoid too much pressure on a single broker on startup this.currentContactSeedBrokerIndex = indexOfThisSubtask % seedBrokerAddresses.length; this.numRetries = getInt(kafkaProperties, GET_PARTITIONS_RETRIES_KEY, DEFAULT_GET_PARTITIONS_RETRIES); this.soTimeout = getInt(kafkaProperties, "socket.timeout.ms", 30000); this.bufferSize = getInt(kafkaProperties, "socket.receive.buffer.bytes", 65536); }
private FlinkKafkaConsumer08( List<String> topics, Pattern subscriptionPattern, KeyedDeserializationSchema<T> deserializer, Properties props) { super( topics, subscriptionPattern, deserializer, getLong( checkNotNull(props, "props"), KEY_PARTITION_DISCOVERY_INTERVAL_MILLIS, PARTITION_DISCOVERY_DISABLED), !getBoolean(props, KEY_DISABLE_METRICS, false)); this.kafkaProperties = props; // validate the zookeeper properties validateZooKeeperConfig(props); // eagerly check for invalid "auto.offset.reset" values before launching the job validateAutoOffsetResetValue(props); }
public SimpleConsumerThread( Kafka08Fetcher<T> owner, ExceptionProxy errorHandler, Properties config, Node broker, List<KafkaTopicPartitionState<TopicAndPartition>> seedPartitions, ClosableBlockingQueue<KafkaTopicPartitionState<TopicAndPartition>> unassignedPartitions, KeyedDeserializationSchema<T> deserializer, long invalidOffsetBehavior) { this.owner = owner; this.errorHandler = errorHandler; this.broker = broker; // all partitions should have been assigned a starting offset by the fetcher checkAllPartitionsHaveDefinedStartingOffsets(seedPartitions); this.partitions = seedPartitions; this.deserializer = requireNonNull(deserializer); this.unassignedPartitions = requireNonNull(unassignedPartitions); this.newPartitionsQueue = new ClosableBlockingQueue<>(); this.invalidOffsetBehavior = invalidOffsetBehavior; // these are the actual configuration values of Kafka + their original default values. this.soTimeout = getInt(config, "socket.timeout.ms", 30000); this.minBytes = getInt(config, "fetch.min.bytes", 1); this.maxWait = getInt(config, "fetch.wait.max.ms", 100); this.fetchSize = getInt(config, "fetch.message.max.bytes", 1048576); this.bufferSize = getInt(config, "socket.receive.buffer.bytes", 65536); this.reconnectLimit = getInt(config, "flink.simple-consumer-reconnectLimit", 3); String groupId = config.getProperty("group.id", "flink-kafka-consumer-legacy-" + broker.id()); this.clientId = config.getProperty("client.id", groupId); }
@Override protected boolean getIsAutoCommitEnabled() { return getBoolean(properties, ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true) && PropertiesUtil.getLong(properties, ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 5000) > 0; }
public SimpleConsumerThread( Kafka08Fetcher<T> owner, ExceptionProxy errorHandler, Properties config, Node broker, List<KafkaTopicPartitionState<TopicAndPartition>> seedPartitions, ClosableBlockingQueue<KafkaTopicPartitionState<TopicAndPartition>> unassignedPartitions, KeyedDeserializationSchema<T> deserializer, long invalidOffsetBehavior) { this.owner = owner; this.errorHandler = errorHandler; this.broker = broker; // all partitions should have been assigned a starting offset by the fetcher checkAllPartitionsHaveDefinedStartingOffsets(seedPartitions); this.partitions = seedPartitions; this.deserializer = requireNonNull(deserializer); this.unassignedPartitions = requireNonNull(unassignedPartitions); this.newPartitionsQueue = new ClosableBlockingQueue<>(); this.invalidOffsetBehavior = invalidOffsetBehavior; // these are the actual configuration values of Kafka + their original default values. this.soTimeout = getInt(config, "socket.timeout.ms", 30000); this.minBytes = getInt(config, "fetch.min.bytes", 1); this.maxWait = getInt(config, "fetch.wait.max.ms", 100); this.fetchSize = getInt(config, "fetch.message.max.bytes", 1048576); this.bufferSize = getInt(config, "socket.receive.buffer.bytes", 65536); this.reconnectLimit = getInt(config, "flink.simple-consumer-reconnectLimit", 3); String groupId = config.getProperty("group.id", "flink-kafka-consumer-legacy-" + broker.id()); this.clientId = config.getProperty("client.id", groupId); }