@Deprecated static void assignPartitions( final KafkaConsumer consumer, final String topic, final Set<Integer> partitions ) { consumer.assign( new ArrayList<>( partitions.stream().map(n -> new TopicPartition(topic, n)).collect(Collectors.toList()) ) ); }
@Override public Set<StreamPartition<Integer>> getAssignment() { Set<TopicPartition> topicPartitions = consumer.assignment(); return topicPartitions .stream() .map(e -> new StreamPartition<>(e.topic(), e.partition())) .collect(Collectors.toSet()); }
@Override public int hashCode() { int result = topicPart.hashCode(); result = 31 * result + (int) (offset ^ (offset >>> 32)); return result; } }
private BundleTracker(final ConsumerRecord<byte[], byte[]> initialRecord, final TopicPartition topicPartition, final String keyEncoding) { this.initialOffset = initialRecord.offset(); this.partition = topicPartition.partition(); this.topic = topicPartition.topic(); this.key = encodeKafkaKey(initialRecord.key(), keyEncoding); }
@Override public void handleCompletedMetadataResponse(RequestHeader requestHeader, long now, MetadataResponse response) { this.metadataFetchInProgress = false; // If any partition has leader with missing listeners, log a few for diagnosing broker configuration // issues. This could be a transient issue if listeners were added dynamically to brokers. List<TopicPartition> missingListenerPartitions = response.topicMetadata().stream().flatMap(topicMetadata -> topicMetadata.partitionMetadata().stream() .filter(partitionMetadata -> partitionMetadata.error() == Errors.LISTENER_NOT_FOUND) .map(partitionMetadata -> new TopicPartition(topicMetadata.topic(), partitionMetadata.partition()))) .collect(Collectors.toList()); if (!missingListenerPartitions.isEmpty()) { int count = missingListenerPartitions.size(); log.warn("{} partitions have leader brokers without a matching listener, including {}", count, missingListenerPartitions.subList(0, Math.min(10, count))); } // check if any topics metadata failed to get updated Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) log.warn("Error while fetching metadata with correlation id {} : {}", requestHeader.correlationId(), errors); // don't update the cluster if there are no valid nodes...the topic we want may still be in the process of being // created which means we will get errors and no nodes until it exists if (response.brokers().isEmpty()) { log.trace("Ignoring empty metadata response with correlation id {}.", requestHeader.correlationId()); this.metadata.failedUpdate(now, null); } else { this.metadata.update(response, now); } }
private static List<TopicPartition> fetchTopicPartitions(String topic, KafkaConsumer<byte[], byte[]> consumer) { // this will block till REQUEST_TIMEOUT_MS_CONFIG = "request.timeout.ms" // then throws org.apache.kafka.common.errors.TimeoutException if can not fetch metadata // @TODO add retry logic maybe List<PartitionInfo> partitions = consumer.partitionsFor(topic); return partitions.stream().map(p -> new TopicPartition(topic, p.partition())).collect(Collectors.toList()); }
@Override public void seekToLatest(Set<StreamPartition<Integer>> partitions) { consumer.seekToEnd(partitions .stream() .map(e -> new TopicPartition(e.getStream(), e.getPartitionId())) .collect(Collectors.toList())); }
@Override public void seekToEarliest(Set<StreamPartition<Integer>> partitions) { consumer.seekToBeginning(partitions .stream() .map(e -> new TopicPartition(e.getStream(), e.getPartitionId())) .collect(Collectors.toList())); }
/** * Replace the current set of topics maintained to the one provided. * If topic expiry is enabled, expiry time of the topics will be * reset on the next update. * @param topics */ public synchronized void setTopics(Collection<String> topics) { Set<TopicPartition> partitionsToRemove = lastSeenLeaderEpochs.keySet() .stream() .filter(tp -> !topics.contains(tp.topic())) .collect(Collectors.toSet()); partitionsToRemove.forEach(lastSeenLeaderEpochs::remove); cache.retainTopics(topics); if (!this.topics.keySet().containsAll(topics)) { requestUpdateForNewTopics(); } this.topics.clear(); for (String topic : topics) this.topics.put(topic, TOPIC_EXPIRY_NEEDS_UPDATE); }
@Test public void testGetOrderedPartitionsIsConsistent() { KafkaTridentSpoutEmitter<String, String> emitter = new KafkaTridentSpoutEmitter<>( SingleTopicKafkaTridentSpoutConfiguration.createKafkaSpoutConfigBuilder(-1) .build(), topologyContextMock, config -> consumer, new TopicAssigner()); Set<TopicPartition> allPartitions = new HashSet<>(); int numPartitions = 10; for (int i = 0; i < numPartitions; i++) { allPartitions.add(new TopicPartition(SingleTopicKafkaSpoutConfiguration.TOPIC, i)); } List<Map<String, Object>> serializedPartitions = allPartitions.stream() .map(tp -> tpSerializer.toMap(tp)) .collect(Collectors.toList()); List<KafkaTridentSpoutTopicPartition> orderedPartitions = emitter.getOrderedPartitions(serializedPartitions); assertThat("Should contain all partitions", orderedPartitions.size(), is(allPartitions.size())); Collections.shuffle(serializedPartitions); List<KafkaTridentSpoutTopicPartition> secondGetOrderedPartitions = emitter.getOrderedPartitions(serializedPartitions); assertThat("Ordering must be consistent", secondGetOrderedPartitions, is(orderedPartitions)); serializedPartitions.add(tpSerializer.toMap(new TopicPartition(SingleTopicKafkaSpoutConfiguration.TOPIC, numPartitions))); List<KafkaTridentSpoutTopicPartition> orderedPartitionsWithNewPartition = emitter.getOrderedPartitions(serializedPartitions); orderedPartitionsWithNewPartition.remove(orderedPartitionsWithNewPartition.size() - 1); assertThat("Adding new partitions should not shuffle the existing ordering", orderedPartitionsWithNewPartition, is(orderedPartitions)); }
public KafkaSpoutMessageId(ConsumerRecord<?, ?> consumerRecord, boolean nullTuple) { this(new TopicPartition(consumerRecord.topic(), consumerRecord.partition()), consumerRecord.offset(), nullTuple); }
@Override public List<Message> getListWithoutAck(Long timeout, TimeUnit unit) throws CanalClientException { waitClientRunning(); if (!running) { return Lists.newArrayList(); } ConsumerRecords<String, Message> records = kafkaConsumer.poll(unit.toMillis(timeout)); currentOffsets.clear(); for (TopicPartition topicPartition : records.partitions()) { currentOffsets.put(topicPartition.partition(), kafkaConsumer.position(topicPartition)); } if (!records.isEmpty()) { List<Message> messages = new ArrayList<>(); for (ConsumerRecord<String, Message> record : records) { messages.add(record.value()); } return messages; } return Lists.newArrayList(); }
@Override public Set<TopicPartition> getAllSubscribedPartitions(Consumer<?, ?> consumer) { topics.clear(); Set<TopicPartition> allPartitions = new HashSet<>(); for (Map.Entry<String, List<PartitionInfo>> entry : consumer.listTopics().entrySet()) { if (pattern.matcher(entry.getKey()).matches()) { for (PartitionInfo partitionInfo : entry.getValue()) { allPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())); topics.add(partitionInfo.topic()); } } } return allPartitions; }
@Override public void setCommittedOffset(String topicName, int partition, long offset) { Map<TopicPartition, OffsetAndMetadata> partitionAndOffset = new HashMap<>(); partitionAndOffset.put(new TopicPartition(topicName, partition), new OffsetAndMetadata(offset)); offsetClient.commitSync(partitionAndOffset); }
@Test public void shouldPreserveCommittedConsumerGroupsOffsetsOnAbortIfTransactionsAreEnabled() { buildMockProducer(true); producer.initTransactions(); producer.beginTransaction(); String group = "g"; Map<TopicPartition, OffsetAndMetadata> groupCommit = new HashMap<TopicPartition, OffsetAndMetadata>() { { put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); put(new TopicPartition(topic, 1), new OffsetAndMetadata(73L, null)); } }; producer.sendOffsetsToTransaction(groupCommit, group); producer.commitTransaction(); producer.beginTransaction(); producer.abortTransaction(); Map<String, Map<TopicPartition, OffsetAndMetadata>> expectedResult = new HashMap<>(); expectedResult.put(group, groupCommit); assertThat(producer.consumerGroupOffsetsHistory(), equalTo(Collections.singletonList(expectedResult))); }
public synchronized void addRecord(ConsumerRecord<K, V> record) { ensureNotClosed(); TopicPartition tp = new TopicPartition(record.topic(), record.partition()); Set<TopicPartition> currentAssigned = new HashSet<>(this.subscriptions.assignedPartitions()); if (!currentAssigned.contains(tp)) throw new IllegalStateException("Cannot add records for a partition that is not assigned to the consumer"); List<ConsumerRecord<K, V>> recs = this.records.computeIfAbsent(tp, k -> new ArrayList<>()); recs.add(record); }
private Map<TopicPartition, OffsetAndMetadata> getZookeeperOffsets( KafkaZkClient zkClient, KafkaConsumer<String, byte[]> consumer) { Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); List<PartitionInfo> partitions = consumer.partitionsFor(topicStr); for (PartitionInfo partition : partitions) { TopicPartition topicPartition = new TopicPartition(topicStr, partition.partition()); Option<Object> optionOffset = zkClient.getConsumerOffset(groupId, topicPartition); if (optionOffset.nonEmpty()) { Long offset = (Long) optionOffset.get(); OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offset); offsets.put(topicPartition, offsetAndMetadata); } } return offsets; }
private WriteTxnMarkersRequest createWriteTxnMarkersRequest() { return new WriteTxnMarkersRequest.Builder( Collections.singletonList(new WriteTxnMarkersRequest.TxnMarkerEntry(21L, (short) 42, 73, TransactionResult.ABORT, Collections.singletonList(new TopicPartition("topic", 73))))).build(); }
@Override public Long getCommittedOffset(String topicName, int partition) { OffsetAndMetadata committed = offsetClient.committed(new TopicPartition(topicName, partition)); return (committed != null) ? committed.offset() : null; }