private void setFetchThresholds() { // get the thresholds, and set defaults if not defined. KafkaConfig kafkaConfig = new KafkaConfig(config); Option<String> fetchThresholdOption = kafkaConfig.getConsumerFetchThreshold(systemName); long fetchThreshold = FETCH_THRESHOLD; if (fetchThresholdOption.isDefined()) { fetchThreshold = Long.valueOf(fetchThresholdOption.get()); } Option<String> fetchThresholdBytesOption = kafkaConfig.getConsumerFetchThresholdBytes(systemName); long fetchThresholdBytes = FETCH_THRESHOLD_BYTES; if (fetchThresholdBytesOption.isDefined()) { fetchThresholdBytes = Long.valueOf(fetchThresholdBytesOption.get()); } int numPartitions = topicPartitionsToSSP.size(); if (numPartitions != topicPartitionsToOffset.size()) { throw new SamzaException("topicPartitionsToSSP.size() doesn't match topicPartitionsToOffset.size()"); } if (numPartitions > 0) { perPartitionFetchThreshold = fetchThreshold / numPartitions; if (fetchThresholdBytesEnabled) { // currently this feature cannot be enabled, because we do not have the size of the messages available. // messages get double buffered, hence divide by 2 perPartitionFetchThresholdBytes = (fetchThresholdBytes / 2) / numPartitions; } } LOG.info("{}: fetchThresholdBytes = {}; fetchThreshold={}; numPartitions={}, perPartitionFetchThreshold={}, perPartitionFetchThresholdBytes(0 if disabled)={}", this, fetchThresholdBytes, fetchThreshold, numPartitions, perPartitionFetchThreshold, perPartitionFetchThresholdBytes); }
/** * Create a KafkaSystemConsumer for the provided {@code systemName} * @param kafkaConsumer kafka Consumer object to be used by this system consumer * @param systemName system name for which we create the consumer * @param config application config * @param clientId clientId from the kafka consumer to be used in the KafkaConsumerProxy * @param metrics metrics for this KafkaSystemConsumer * @param clock system clock */ public KafkaSystemConsumer(Consumer<K, V> kafkaConsumer, String systemName, Config config, String clientId, KafkaSystemConsumerMetrics metrics, Clock clock) { super(metrics.registry(), clock, metrics.getClass().getName()); this.kafkaConsumer = kafkaConsumer; this.clientId = clientId; this.systemName = systemName; this.config = config; this.metrics = metrics; fetchThresholdBytesEnabled = new KafkaConfig(config).isConsumerFetchThresholdBytesEnabled(systemName); // create a sink for passing the messages between the proxy and the consumer messageSink = new KafkaConsumerMessageSink(); // Create the proxy to do the actual message reading. String metricName = String.format("%s-%s", systemName, clientId); proxy = new KafkaConsumerProxy(kafkaConsumer, systemName, clientId, messageSink, metrics, metricName); LOG.info("{}: Created KafkaConsumerProxy {} ", this, proxy); }
private void setFetchThresholds() { // get the thresholds, and set defaults if not defined. KafkaConfig kafkaConfig = new KafkaConfig(config); Option<String> fetchThresholdOption = kafkaConfig.getConsumerFetchThreshold(systemName); long fetchThreshold = FETCH_THRESHOLD; if (fetchThresholdOption.isDefined()) { fetchThreshold = Long.valueOf(fetchThresholdOption.get()); } Option<String> fetchThresholdBytesOption = kafkaConfig.getConsumerFetchThresholdBytes(systemName); long fetchThresholdBytes = FETCH_THRESHOLD_BYTES; if (fetchThresholdBytesOption.isDefined()) { fetchThresholdBytes = Long.valueOf(fetchThresholdBytesOption.get()); } int numPartitions = topicPartitionsToSSP.size(); if (numPartitions != topicPartitionsToOffset.size()) { throw new SamzaException("topicPartitionsToSSP.size() doesn't match topicPartitionsToOffset.size()"); } if (numPartitions > 0) { perPartitionFetchThreshold = fetchThreshold / numPartitions; if (fetchThresholdBytesEnabled) { // currently this feature cannot be enabled, because we do not have the size of the messages available. // messages get double buffered, hence divide by 2 perPartitionFetchThresholdBytes = (fetchThresholdBytes / 2) / numPartitions; } } LOG.info("{}: fetchThresholdBytes = {}; fetchThreshold={}; numPartitions={}, perPartitionFetchThreshold={}, perPartitionFetchThresholdBytes(0 if disabled)={}", this, fetchThresholdBytes, fetchThreshold, numPartitions, perPartitionFetchThreshold, perPartitionFetchThresholdBytes); }
private void setFetchThresholds() { // get the thresholds, and set defaults if not defined. KafkaConfig kafkaConfig = new KafkaConfig(config); Option<String> fetchThresholdOption = kafkaConfig.getConsumerFetchThreshold(systemName); long fetchThreshold = FETCH_THRESHOLD; if (fetchThresholdOption.isDefined()) { fetchThreshold = Long.valueOf(fetchThresholdOption.get()); } Option<String> fetchThresholdBytesOption = kafkaConfig.getConsumerFetchThresholdBytes(systemName); long fetchThresholdBytes = FETCH_THRESHOLD_BYTES; if (fetchThresholdBytesOption.isDefined()) { fetchThresholdBytes = Long.valueOf(fetchThresholdBytesOption.get()); } int numPartitions = topicPartitionsToSSP.size(); if (numPartitions != topicPartitionsToOffset.size()) { throw new SamzaException("topicPartitionsToSSP.size() doesn't match topicPartitionsToOffset.size()"); } if (numPartitions > 0) { perPartitionFetchThreshold = fetchThreshold / numPartitions; if (fetchThresholdBytesEnabled) { // currently this feature cannot be enabled, because we do not have the size of the messages available. // messages get double buffered, hence divide by 2 perPartitionFetchThresholdBytes = (fetchThresholdBytes / 2) / numPartitions; } } LOG.info("{}: fetchThresholdBytes = {}; fetchThreshold={}; numPartitions={}, perPartitionFetchThreshold={}, perPartitionFetchThresholdBytes(0 if disabled)={}", this, fetchThresholdBytes, fetchThreshold, numPartitions, perPartitionFetchThreshold, perPartitionFetchThresholdBytes); }
/** * Create a KafkaSystemConsumer for the provided {@code systemName} * @param kafkaConsumer kafka Consumer object to be used by this system consumer * @param systemName system name for which we create the consumer * @param config application config * @param clientId clientId from the kafka consumer to be used in the KafkaConsumerProxy * @param metrics metrics for this KafkaSystemConsumer * @param clock system clock */ public KafkaSystemConsumer(Consumer<K, V> kafkaConsumer, String systemName, Config config, String clientId, KafkaSystemConsumerMetrics metrics, Clock clock) { super(metrics.registry(), clock, metrics.getClass().getName()); this.kafkaConsumer = kafkaConsumer; this.clientId = clientId; this.systemName = systemName; this.config = config; this.metrics = metrics; fetchThresholdBytesEnabled = new KafkaConfig(config).isConsumerFetchThresholdBytesEnabled(systemName); // create a sink for passing the messages between the proxy and the consumer messageSink = new KafkaConsumerMessageSink(); // Create the proxy to do the actual message reading. String metricName = String.format("%s-%s", systemName, clientId); proxy = new KafkaConsumerProxy(kafkaConsumer, systemName, clientId, messageSink, metrics, metricName); LOG.info("{}: Created KafkaConsumerProxy {} ", this, proxy); }
/** * Create a KafkaSystemConsumer for the provided {@code systemName} * @param kafkaConsumer kafka Consumer object to be used by this system consumer * @param systemName system name for which we create the consumer * @param config application config * @param clientId clientId from the kafka consumer to be used in the KafkaConsumerProxy * @param metrics metrics for this KafkaSystemConsumer * @param clock system clock */ public KafkaSystemConsumer(Consumer<K, V> kafkaConsumer, String systemName, Config config, String clientId, KafkaSystemConsumerMetrics metrics, Clock clock) { super(metrics.registry(), clock, metrics.getClass().getName()); this.kafkaConsumer = kafkaConsumer; this.clientId = clientId; this.systemName = systemName; this.config = config; this.metrics = metrics; fetchThresholdBytesEnabled = new KafkaConfig(config).isConsumerFetchThresholdBytesEnabled(systemName); // create a sink for passing the messages between the proxy and the consumer messageSink = new KafkaConsumerMessageSink(); // Create the proxy to do the actual message reading. String metricName = String.format("%s-%s", systemName, clientId); proxy = new KafkaConsumerProxy(kafkaConsumer, systemName, clientId, messageSink, metrics, metricName); LOG.info("{}: Created KafkaConsumerProxy {} ", this, proxy); }
adminClient = AdminClient.create(props); KafkaConfig kafkaConfig = new KafkaConfig(config); coordinatorStreamReplicationFactor = Integer.valueOf(kafkaConfig.getCoordinatorReplicationFactor()); coordinatorStreamProperties = KafkaSystemAdminUtilsScala.getCoordinatorTopicProperties(kafkaConfig);
KafkaConfig kafkaConfig = new KafkaConfig(config); coordinatorStreamReplicationFactor = Integer.valueOf(kafkaConfig.getCoordinatorReplicationFactor()); coordinatorStreamProperties = KafkaSystemAdminUtilsScala.getCoordinatorTopicProperties(kafkaConfig);
KafkaConfig kafkaConfig = new KafkaConfig(config); coordinatorStreamReplicationFactor = Integer.valueOf(kafkaConfig.getCoordinatorReplicationFactor()); coordinatorStreamProperties = KafkaSystemAdminUtilsScala.getCoordinatorTopicProperties(kafkaConfig);
@Test public void testGetCheckpointTopicProperties() { Map<String, String> config = new HashMap<>(); Properties properties = new KafkaConfig(new MapConfig(config)).getCheckpointTopicProperties(); assertEquals(properties.getProperty("cleanup.policy"), "compact"); assertEquals(properties.getProperty("segment.bytes"), String.valueOf(KafkaConfig.DEFAULT_CHECKPOINT_SEGMENT_BYTES())); config.put(ApplicationConfig.APP_MODE, ApplicationConfig.ApplicationMode.BATCH.name()); properties = new KafkaConfig(new MapConfig(config)).getCheckpointTopicProperties(); assertEquals(properties.getProperty("cleanup.policy"), "compact,delete"); assertEquals(properties.getProperty("segment.bytes"), String.valueOf(KafkaConfig.DEFAULT_CHECKPOINT_SEGMENT_BYTES())); assertEquals(properties.getProperty("retention.ms"), String.valueOf(KafkaConfig.DEFAULT_RETENTION_MS_FOR_BATCH())); } }