@Test public void testBrokerDiesWhenMovePartitions() throws Exception { ZkUtils zkUtils = KafkaCruiseControlUtils.createZkUtils(zookeeper().getConnectionString()); Map<String, TopicDescription> topicDescriptions = createTopics(); int initialLeader0 = topicDescriptions.get(TOPIC0).partitions().get(0).leader().id(); int initialLeader1 = topicDescriptions.get(TOPIC1).partitions().get(0).leader().id(); _brokers.get(initialLeader0 == 0 ? 1 : 0).shutdown(); ExecutionProposal proposal0 = new ExecutionProposal(TP0, 0, initialLeader0, Collections.singletonList(initialLeader0), Collections.singletonList(initialLeader0 == 0 ? 1 : 0)); ExecutionProposal proposal1 = new ExecutionProposal(TP1, 0, initialLeader1, Arrays.asList(initialLeader1, initialLeader1 == 0 ? 1 : 0), Arrays.asList(initialLeader1 == 0 ? 1 : 0, initialLeader1)); Collection<ExecutionProposal> proposalsToExecute = Arrays.asList(proposal0, proposal1); try { executeAndVerifyProposals(zkUtils, proposalsToExecute, Collections.emptyList()); // We are not doing the rollback. assertEquals(Collections.singletonList(initialLeader0 == 0 ? 1 : 0), ExecutorUtils.newAssignmentForPartition(zkUtils, TP0)); assertEquals(initialLeader0, zkUtils.getLeaderForPartition(TOPIC1, PARTITION).get()); } finally { KafkaCruiseControlUtils.closeZkUtilsWithTimeout(zkUtils, ZK_UTILS_CLOSE_TIMEOUT_MS); } }
private void executeAndVerifyProposals(ZkUtils zkUtils, Collection<ExecutionProposal> proposalsToExecute, Collection<ExecutionProposal> proposalsToCheck) { KafkaCruiseControlConfig configs = new KafkaCruiseControlConfig(getExecutorProperties()); Executor executor = new Executor(configs, new SystemTime(), new MetricRegistry(), 86400000L, 43200000L); executor.setExecutionMode(false); executor.executeProposals(proposalsToExecute, Collections.emptySet(), null, EasyMock.mock(LoadMonitor.class), null, null, null); Map<TopicPartition, Integer> replicationFactors = new HashMap<>(); for (ExecutionProposal proposal : proposalsToCheck) { int replicationFactor = zkUtils.getReplicasForPartition(proposal.topic(), proposal.partitionId()).size(); replicationFactors.put(new TopicPartition(proposal.topic(), proposal.partitionId()), replicationFactor); } waitUntilExecutionFinishes(executor); for (ExecutionProposal proposal : proposalsToCheck) { TopicPartition tp = new TopicPartition(proposal.topic(), proposal.partitionId()); int expectedReplicationFactor = replicationFactors.get(tp); assertEquals("Replication factor for partition " + tp + " should be " + expectedReplicationFactor, expectedReplicationFactor, zkUtils.getReplicasForPartition(tp.topic(), tp.partition()).size()); if (proposal.hasReplicaAction()) { for (int brokerId : proposal.newReplicas()) { assertTrue("The partition should have moved for " + tp, zkUtils.getReplicasForPartition(tp.topic(), tp.partition()).contains(brokerId)); } } assertEquals("The leader should have moved for " + tp, proposal.newLeader(), zkUtils.getLeaderForPartition(tp.topic(), tp.partition()).get()); } }
private SimpleConsumer findLeaderConsumer(String topic, int partition) { Option<Object> leaderForPartition = zkUtils.getLeaderForPartition(topic, partition); return getZkConsumer((Integer) leaderForPartition.get()); }
private long getTopicLogSize(String topic, int pid) { Option<Object> o = ZkUtils.getLeaderForPartition(zkClient, topic, pid); if (o.isEmpty() || o.get() == null) { log.error("No broker for partition %s - %s", topic, pid); return 0; } Integer leaderId = Int.unbox(o.get()); SimpleConsumer consumer = consumerMap.get(leaderId); if (consumer == null) { consumer = createSimpleConsumer(leaderId); } // createSimpleConsumer may fail. if (consumer == null) { return 0; } consumerMap.put(leaderId, consumer); TopicAndPartition topicAndPartition = new TopicAndPartition(topic, pid); PartitionOffsetRequestInfo requestInfo = new PartitionOffsetRequestInfo(OffsetRequest.LatestTime(), 1); OffsetRequest request = new OffsetRequest( new Map1<TopicAndPartition, PartitionOffsetRequestInfo>(topicAndPartition, requestInfo), 0, Request.OrdinaryConsumerId() ); OffsetResponse response = consumer.getOffsetsBefore(request); PartitionOffsetsResponse offsetsResponse = response.partitionErrorAndOffsets().get(topicAndPartition).get(); return scala.Long.unbox(offsetsResponse.offsets().head()); }