Deque<ProducerBatch> deque = entry.getValue(); Node leader = cluster.leaderFor(part); synchronized (deque) { if (leader == null && !deque.isEmpty()) {
Node node = cluster.leaderFor(entry.getKey()); if (node != null) { if (!leaders.containsKey(node))
/** * For each proposal, create a leader action task if there is a need for moving the leadership to reach expected final proposal state. * * @param proposals Execution proposals. * @param cluster Kafka cluster state. */ private void maybeAddLeaderChangeTasks(Collection<ExecutionProposal> proposals, Cluster cluster) { for (ExecutionProposal proposal : proposals) { if (proposal.hasLeaderAction()) { Node currentLeader = cluster.leaderFor(proposal.topicPartition()); if (currentLeader != null && currentLeader.id() != proposal.newLeader()) { // Get the execution Id for the leader action proposal execution; long leaderActionExecutionId = _executionId++; ExecutionTask leaderActionTask = new ExecutionTask(leaderActionExecutionId, proposal, LEADER_ACTION); _remainingLeadershipMovements.put(leaderActionExecutionId, leaderActionTask); LOG.trace("Added action {} as leader proposal {}", leaderActionExecutionId, proposal); } } } }
private void testGetOffsetsForTimesWithUnknownOffset() { client.reset(); // Ensure metadata has both partition. MetadataResponse initialMetadataUpdate = TestUtils.metadataUpdateWith(1, singletonMap(topicName, 1)); client.updateMetadata(initialMetadataUpdate); Map<TopicPartition, ListOffsetResponse.PartitionData> partitionData = new HashMap<>(); partitionData.put(tp0, new ListOffsetResponse.PartitionData(Errors.NONE, ListOffsetResponse.UNKNOWN_TIMESTAMP, ListOffsetResponse.UNKNOWN_OFFSET, Optional.empty())); client.prepareResponseFrom(new ListOffsetResponse(0, partitionData), metadata.fetch().leaderFor(tp0)); Map<TopicPartition, Long> timestampToSearch = new HashMap<>(); timestampToSearch.put(tp0, 0L); Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = fetcher.offsetsByTimes(timestampToSearch, time.timer(Long.MAX_VALUE)); assertTrue(offsetAndTimestampMap.containsKey(tp0)); assertNull(offsetAndTimestampMap.get(tp0)); }
/** * The completeness of leadership movement depends on the task state: * IN_PROGRESS: done when the leader becomes the destination. * ABORTING or DEAD: always considered as done the destination cannot become leader anymore. * * There should be no other task state seen here. */ private boolean isLeadershipMovementDone(Cluster cluster, TopicPartition tp, ExecutionTask task) { Node leader = cluster.leaderFor(tp); switch (task.state()) { case IN_PROGRESS: return (leader != null && leader.id() == task.proposal().newLeader()) || leader == null || !isInIsr(task.proposal().newLeader(), cluster, tp); case ABORTING: case DEAD: return true; default: throw new IllegalStateException("Should never be here."); } }
updatedCluster.leaderFor(t2p0)); client.prepareResponseFrom(listOffsetResponse(tp1, errorForP1, offsetForP1, offsetForP1), updatedCluster.leaderFor(tp1)); updatedCluster.leaderFor(t2p0)); client.prepareResponseFrom(listOffsetResponse(tp1, Errors.NONE, offsetForP1, offsetForP1), updatedCluster.leaderFor(tp1));
metadata.fetch().leaderFor(tp0)); client.prepareResponseFrom(listOffsetResponse(tp1, Errors.NONE, 1000L, 32L), metadata.fetch().leaderFor(tp1)); client.prepareMetadataUpdate(updatedMetadata, false); client.prepareResponseFrom(listOffsetResponse(t2p0, Errors.NONE, 1000L, 54L), metadata.fetch().leaderFor(t2p0));
boolean validLeader = true; if (leaderValidation) { Node leader = _metadata.fetch().leaderFor(sample.entity().tp()); validLeader = (leader != null) && (sample.brokerId() == leader.id()); if (!validLeader) {
Map<Integer, Map<String, Integer>> leaderDistributionStats) { TopicPartition tpWithDotHandled = ModelUtils.partitionHandleDotInTopicName(tpDotNotHandled); Node leaderNode = cluster.leaderFor(tpDotNotHandled); if (leaderNode == null) { return null;
Deque<RecordBatch> deque = entry.getValue(); Node leader = cluster.leaderFor(part); if (leader == null) { unknownLeadersExist = true;