private PartitionInfo createPartitionInfo(String topic, int partition) { return new PartitionInfo(topic, partition, null, null, null); } }
private PartitionInfo createPartitionInfo(String topic, int partition) { return new PartitionInfo(topic, partition, null, null, null); }
@Override public List<PartitionInfo> partitionsFor(String s) { String[] streams = getStreamsForTopic(s); List<PartitionInfo> partitions = Lists.newArrayListWithExpectedSize(streams.length); for (int i = 0; i < streams.length; i++) { // TODO: maybe add getOwner from dl write proxy to return the owner of the partition partitions.add(new PartitionInfo(s, i, null, null, null)); } return partitions; }
/** * Transform a topic and PartitionMetadata into PartitionInfo * @return */ public static PartitionInfo partitionMetaToInfo(String topic, PartitionMetadata partitionMetadata) { return new PartitionInfo( topic, partitionMetadata.partition(), partitionMetadata.leader(), partitionMetadata.replicas().toArray(new Node[0]), partitionMetadata.isr().toArray(new Node[0]), partitionMetadata.offlineReplicas().toArray(new Node[0])); }
public static Cluster clusterWith(final int nodes, final Map<String, Integer> topicPartitionCounts) { final Node[] ns = new Node[nodes]; for (int i = 0; i < nodes; i++) ns[i] = new Node(i, "localhost", 1969); final List<PartitionInfo> parts = new ArrayList<>(); for (final Map.Entry<String, Integer> topicPartition : topicPartitionCounts.entrySet()) { final String topic = topicPartition.getKey(); final int partitions = topicPartition.getValue(); for (int i = 0; i < partitions; i++) parts.add(new PartitionInfo(topic, i, ns[i % ns.length], ns, ns)); } return new Cluster("kafka-cluster", asList(ns), parts, Collections.emptySet(), Topic.INTERNAL_TOPICS); }
@Test public void detectBrokerWithoutLeader() { List<PartitionInfo> partitions = new ArrayList<>(); Node[] node = nodes(3); partitions.add(new PartitionInfo(TOPIC, 0, node[0], new Node[] {node[0], node[1]}, null)); partitions.add(new PartitionInfo(TOPIC, 1, node[0], new Node[] {node[0], node[1]}, null)); partitions.add(new PartitionInfo(TOPIC, 2, node[1], new Node[] {node[1], node[0]}, null)); partitions.add(new PartitionInfo(TOPIC, 3, node[1], new Node[] {node[2], node[1]}, null)); partitions.add(new PartitionInfo(TOPIC, 4, node[1], new Node[] {node[2], node[0]}, null)); Assert.assertFalse(TopicManagementHelper.someBrokerNotPreferredLeader(partitions, brokers(3))); Assert.assertTrue(TopicManagementHelper.someBrokerNotElectedLeader(partitions, brokers(3))); }
@Test public void noDetection() { List<PartitionInfo> partitions = new ArrayList<>(); Node[] node = nodes(2); partitions.add(new PartitionInfo(TOPIC, 0, node[0], new Node[] {node[0], node[1]}, null)); partitions.add(new PartitionInfo(TOPIC, 1, node[0], new Node[] {node[0], node[1]}, null)); partitions.add(new PartitionInfo(TOPIC, 2, node[1], new Node[] {node[1], node[0]}, null)); partitions.add(new PartitionInfo(TOPIC, 3, node[1], new Node[] {node[1], node[0]}, null)); Assert.assertFalse(TopicManagementHelper.someBrokerNotPreferredLeader(partitions, brokers(2))); Assert.assertFalse(TopicManagementHelper.someBrokerNotElectedLeader(partitions, brokers(2))); }
@Test public void detectBrokerWithoutPreferredLeader() { List<PartitionInfo> partitions = new ArrayList<>(); Node[] node = nodes(3); partitions.add(new PartitionInfo(TOPIC, 0, node[0], new Node[] {node[0], node[1]}, null)); partitions.add(new PartitionInfo(TOPIC, 1, node[0], new Node[] {node[0], node[1]}, null)); partitions.add(new PartitionInfo(TOPIC, 2, node[1], new Node[] {node[0], node[0]}, null)); partitions.add(new PartitionInfo(TOPIC, 3, node[1], new Node[] {node[2], node[1]}, null)); partitions.add(new PartitionInfo(TOPIC, 4, node[1], new Node[] {node[2], node[0]}, null)); Assert.assertTrue(TopicManagementHelper.someBrokerNotPreferredLeader(partitions, brokers(3))); Assert.assertTrue(TopicManagementHelper.someBrokerNotElectedLeader(partitions, brokers(3))); } }
private Cluster getCluster() { Node node0 = new Node(BROKER_ID_0, "localhost", 100, "rack0"); Node node1 = new Node(BROKER_ID_1, "localhost", 100, "rack1"); Node[] nodes = {node0, node1}; Set<Node> allNodes = new HashSet<>(); allNodes.add(node0); allNodes.add(node1); Set<PartitionInfo> parts = new HashSet<>(); parts.add(new PartitionInfo(TOPIC1, P0, node0, nodes, nodes)); parts.add(new PartitionInfo(TOPIC1, P1, node1, nodes, nodes)); parts.add(new PartitionInfo(TOPIC2, P0, node0, nodes, nodes)); parts.add(new PartitionInfo(TOPIC2, P1, node0, nodes, nodes)); return new Cluster("testCluster", allNodes, parts, Collections.emptySet(), Collections.emptySet()); } }
final String topicB = "topicB"; List<PartitionInfo> allPartitions = asList(new PartitionInfo(topicA, 0, node0, nodes, nodes), new PartitionInfo(topicA, 1, node1, nodes, nodes), new PartitionInfo(topicA, 2, node2, nodes, nodes), new PartitionInfo(topicB, 0, node0, nodes, nodes) ); Cluster testCluster = new Cluster("clusterId", asList(node0, node1, node2), allPartitions,
@Test public void detectLowTotalNumberOfPartitions() { List<PartitionInfo> partitions = new ArrayList<>(); Node[] node = nodes(3); partitions.add(new PartitionInfo(TOPIC, 0, node[0], new Node[] {node[0], node[1]}, null)); partitions.add(new PartitionInfo(TOPIC, 1, node[1], new Node[] {node[1], node[0]}, null)); partitions.add(new PartitionInfo(TOPIC, 2, node[2], new Node[] {node[2], node[0]}, null)); Assert.assertFalse(TopicManagementHelper.someBrokerNotPreferredLeader(partitions, brokers(3))); Assert.assertFalse(TopicManagementHelper.someBrokerNotElectedLeader(partitions, brokers(3))); Assert.assertEquals(TopicManagementHelper.getReplicationFactor(partitions), 2); }
private Cluster generateExpectedCluster(ExecutionProposal proposal, TopicPartition tp, boolean isLeaderMove) { List<Node> mockProposalReplicas = new ArrayList<>(proposal.oldReplicas().size()); for (Integer oldId: proposal.oldReplicas()) { mockProposalReplicas.add(new Node(oldId, "null", -1)); } Node[] isrArray = new Node[mockProposalReplicas.size()]; isrArray = mockProposalReplicas.toArray(isrArray); Set<PartitionInfo> partitions = new HashSet<>(); partitions.add(new PartitionInfo(tp.topic(), tp.partition(), mockProposalReplicas.get(isLeaderMove ? 1 : 0), isrArray, isrArray)); return new Cluster(null, mockProposalReplicas, partitions, Collections.<String>emptySet(), Collections.<String>emptySet()); }
private Cluster generateExpectedCluster(ExecutionProposal proposal, TopicPartition tp) { List<Node> expectedReplicas = new ArrayList<>(proposal.oldReplicas().size()); expectedReplicas.add(new Node(0, "null", -1)); expectedReplicas.add(new Node(2, "null", -1)); Node[] isrArray = new Node[expectedReplicas.size()]; isrArray = expectedReplicas.toArray(isrArray); Set<PartitionInfo> partitions = new HashSet<>(); partitions.add(new PartitionInfo(tp.topic(), tp.partition(), expectedReplicas.get(1), isrArray, isrArray)); return new Cluster(null, expectedReplicas, partitions, Collections.emptySet(), Collections.emptySet()); }
private Cluster getCluster(Collection<TopicPartition> partitions) { Node node0 = new Node(0, "localhost", 100, "rack0"); Node node1 = new Node(1, "localhost", 100, "rack1"); Node[] nodes = {node0, node1}; Set<Node> allNodes = new HashSet<>(2); allNodes.add(node0); allNodes.add(node1); Set<PartitionInfo> parts = new HashSet<>(partitions.size()); for (TopicPartition tp : partitions) { parts.add(new PartitionInfo(tp.topic(), tp.partition(), node0, nodes, nodes)); } return new Cluster("cluster_id", allNodes, parts, Collections.emptySet(), Collections.emptySet()); }
@Test public void testPartitioner() throws Exception { PartitionInfo partitionInfo0 = new PartitionInfo(topic, 0, null, null, null); PartitionInfo partitionInfo1 = new PartitionInfo(topic, 1, null, null, null); Cluster cluster = new Cluster(null, new ArrayList<Node>(0), asList(partitionInfo0, partitionInfo1), Collections.<String>emptySet(), Collections.<String>emptySet()); MockProducer<String, String> producer = new MockProducer<>(cluster, true, new DefaultPartitioner(), new StringSerializer(), new StringSerializer()); ProducerRecord<String, String> record = new ProducerRecord<>(topic, "key", "value"); Future<RecordMetadata> metadata = producer.send(record); assertEquals("Partition should be correct", 1, metadata.get().partition()); producer.clear(); assertEquals("Clear should erase our history", 0, producer.history().size()); producer.close(); }
/** * Tests that partitions list is determinate and correctly provided to custom partitioner. */ @SuppressWarnings("unchecked") @Test public void testPartitionerInvokedWithDeterminatePartitionList() throws Exception { FlinkKafkaPartitioner<String> mockPartitioner = mock(FlinkKafkaPartitioner.class); RuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class); when(mockRuntimeContext.getIndexOfThisSubtask()).thenReturn(0); when(mockRuntimeContext.getNumberOfParallelSubtasks()).thenReturn(1); // out-of-order list of 4 partitions List<PartitionInfo> mockPartitionsList = new ArrayList<>(4); mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 3, null, null, null)); mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 1, null, null, null)); mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 0, null, null, null)); mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 2, null, null, null)); final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>( FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), mockPartitioner); producer.setRuntimeContext(mockRuntimeContext); final KafkaProducer mockProducer = producer.getMockKafkaProducer(); when(mockProducer.partitionsFor(anyString())).thenReturn(mockPartitionsList); when(mockProducer.metrics()).thenReturn(null); producer.open(new Configuration()); verify(mockPartitioner, times(1)).open(0, 1); producer.invoke("foobar", SinkContextUtil.forTimestamp(0)); verify(mockPartitioner, times(1)).partition( "foobar", null, "foobar".getBytes(), DummyFlinkKafkaProducer.DUMMY_TOPIC, new int[] {0, 1, 2, 3}); }
@Test public void testRaiseErrorWhenNoPartitionsPendingOnDrain() throws InterruptedException { final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); // Don't execute transactionManager.maybeAddPartitionToTransaction(tp0). This should result in an error on drain. accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT); Node node1 = new Node(0, "localhost", 1111); PartitionInfo part1 = new PartitionInfo(topic, 0, node1, null, null); Cluster cluster = new Cluster(null, Collections.singletonList(node1), Collections.singletonList(part1), Collections.emptySet(), Collections.emptySet()); Set<Node> nodes = new HashSet<>(); nodes.add(node1); Map<Integer, List<ProducerBatch>> drainedBatches = accumulator.drain(cluster, nodes, Integer.MAX_VALUE, time.milliseconds()); // We shouldn't drain batches which haven't been added to the transaction yet. assertTrue(drainedBatches.containsKey(node1.id())); assertTrue(drainedBatches.get(node1.id()).isEmpty()); }
@Test public void testToString() { String topic = "sample"; int partition = 0; Node leader = new Node(0, "localhost", 9092); Node r1 = new Node(1, "localhost", 9093); Node r2 = new Node(2, "localhost", 9094); Node[] replicas = new Node[] {leader, r1, r2}; Node[] inSyncReplicas = new Node[] {leader, r1}; Node[] offlineReplicas = new Node[] {r2}; PartitionInfo partitionInfo = new PartitionInfo(topic, partition, leader, replicas, inSyncReplicas, offlineReplicas); String expected = String.format("Partition(topic = %s, partition = %d, leader = %s, replicas = %s, isr = %s, offlineReplicas = %s)", topic, partition, leader.idString(), "[0,1,2]", "[0,1]", "[2]"); Assert.assertEquals(expected, partitionInfo.toString()); }
PartitionInfo part1 = new PartitionInfo(topic, 0, node1, null, null); PartitionInfo part2 = new PartitionInfo(topic, 1, node2, null, null);
PartitionInfo part1 = new PartitionInfo(topic, 1, node1, null, null); Cluster cluster = new Cluster(null, Collections.singletonList(node1), Collections.singletonList(part1), Collections.emptySet(), Collections.emptySet());