@Override public String formatAsText() { try (KafkaConsumer consumer = new KafkaConsumer(consumerProperties) { }) { //noinspection unchecked List<PartitionInfo> partitionsInfo = consumer.partitionsFor(topic); List<TopicPartition> topicPartitions = partitionsInfo.stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); Map endOffsets = consumer.endOffsets(topicPartitions); Map startOffsets = consumer.beginningOffsets(topicPartitions); return partitionsInfo.stream() .map(partitionInfo -> String.format("%s [start offset = [%s], end offset = [%s]]", partitionInfo.toString(), startOffsets.get(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())), endOffsets.get(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())))) .collect(Collectors.joining("\n")); } catch (Exception e) { return String.format("ERROR fetching metadata for Topic [%s], Connection String [%s], Error [%s]", topic, consumerProperties.getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG), e.getMessage()); } } }
@Test public void testToString() { String topic = "sample"; int partition = 0; Node leader = new Node(0, "localhost", 9092); Node r1 = new Node(1, "localhost", 9093); Node r2 = new Node(2, "localhost", 9094); Node[] replicas = new Node[] {leader, r1, r2}; Node[] inSyncReplicas = new Node[] {leader, r1}; Node[] offlineReplicas = new Node[] {r2}; PartitionInfo partitionInfo = new PartitionInfo(topic, partition, leader, replicas, inSyncReplicas, offlineReplicas); String expected = String.format("Partition(topic = %s, partition = %d, leader = %s, replicas = %s, isr = %s, offlineReplicas = %s)", topic, partition, leader.idString(), "[0,1,2]", "[0,1]", "[2]"); Assert.assertEquals(expected, partitionInfo.toString()); }
@Override public String formatAsText() { try (KafkaConsumer consumer = new KafkaConsumer(consumerProperties) { }) { //noinspection unchecked List<PartitionInfo> partitionsInfo = consumer.partitionsFor(topic); List<TopicPartition> topicPartitions = partitionsInfo.stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); Map endOffsets = consumer.endOffsets(topicPartitions); Map startOffsets = consumer.beginningOffsets(topicPartitions); return partitionsInfo.stream() .map(partitionInfo -> String.format("%s [start offset = [%s], end offset = [%s]]", partitionInfo.toString(), startOffsets.get(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())), endOffsets.get(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())))) .collect(Collectors.joining("\n")); } catch (Exception e) { return String.format("ERROR fetching metadata for Topic [%s], Connection String [%s], Error [%s]", topic, consumerProperties.getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG), e.getMessage()); } } }
@Override public Health call() { try (Consumer<?, ?> metadataConsumer = consumerFactory.createConsumer()) { Set<String> downMessages = new HashSet<>(); for (String topic : KafkaBinderHealthIndicator.this.binder.getTopicsInUse().keySet()) { List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic); for (PartitionInfo partitionInfo : partitionInfos) { if (KafkaBinderHealthIndicator.this.binder.getTopicsInUse().get(topic).getPartitionInfos() .contains(partitionInfo) && partitionInfo.leader().id() == -1) { downMessages.add(partitionInfo.toString()); } } } if (downMessages.isEmpty()) { return Health.up().build(); } else { return Health.down() .withDetail("Following partitions in use have no leaders: ", downMessages.toString()) .build(); } } catch (Exception e) { return Health.down(e).build(); } }
if (topicInformation.getPartitionInfos() .contains(partitionInfo) && partitionInfo.leader().id() == -1) { downMessages.add(partitionInfo.toString());
if (topicInformation.getPartitionInfos() .contains(partitionInfo) && partitionInfo.leader().id() == -1) { downMessages.add(partitionInfo.toString());