/** * The partition the record was sent to */ public int partition() { return this.topicPartition.partition(); }
public int partition() { return topicPart.partition(); }
/** * Group a list of partitions by the topic name. * * @param partitions The partitions to collect * @return partitions per topic */ public static Map<String, List<Integer>> groupPartitionsByTopic(Collection<TopicPartition> partitions) { Map<String, List<Integer>> partitionsByTopic = new HashMap<>(); for (TopicPartition tp : partitions) { String topic = tp.topic(); List<Integer> topicData = partitionsByTopic.computeIfAbsent(topic, t -> new ArrayList<>()); topicData.add(tp.partition()); } return partitionsByTopic; }
public static <T> List<TopicAndPartitionData<T>> batchByTopic(Iterator<Map.Entry<TopicPartition, T>> iter) { List<TopicAndPartitionData<T>> topics = new ArrayList<>(); while (iter.hasNext()) { Map.Entry<TopicPartition, T> topicEntry = iter.next(); String topic = topicEntry.getKey().topic(); int partition = topicEntry.getKey().partition(); T partitionData = topicEntry.getValue(); if (topics.isEmpty() || !topics.get(topics.size() - 1).topic.equals(topic)) topics.add(new TopicAndPartitionData<T>(topic)); topics.get(topics.size() - 1).partitions.put(partition, partitionData); } return topics; } }
private static Map<String, Object> tpMeta(TopicPartition tp) { Map<String, Object> tpMeta = new HashMap<>(); tpMeta.put("topic", tp.topic()); tpMeta.put("partition", tp.partition()); return tpMeta; }
/** * group data by topic * * @param data Data to be partitioned * @param <T> Partition data type * @return partitioned data */ public static <T> Map<String, Map<Integer, T>> groupPartitionDataByTopic(Map<TopicPartition, ? extends T> data) { Map<String, Map<Integer, T>> dataByTopic = new HashMap<>(); for (Map.Entry<TopicPartition, ? extends T> entry : data.entrySet()) { String topic = entry.getKey().topic(); int partition = entry.getKey().partition(); Map<Integer, T> topicData = dataByTopic.computeIfAbsent(topic, t -> new HashMap<>()); topicData.put(partition, entry.getValue()); } return dataByTopic; }
/** * Serializes the given TopicPartition to Map so Trident can serialize it to JSON. */ public Map<String, Object> toMap(TopicPartition topicPartition) { Map<String, Object> topicPartitionMap = new HashMap<>(); topicPartitionMap.put(TOPIC_PARTITION_TOPIC_KEY, topicPartition.topic()); topicPartitionMap.put(TOPIC_PARTITION_PARTITION_KEY, topicPartition.partition()); return topicPartitionMap; }
@Override public String getId() { return topicPartition.topic() + "@" + topicPartition.partition(); }
@Override public Set<StreamPartition<Integer>> getAssignment() { Set<TopicPartition> topicPartitions = consumer.assignment(); return topicPartitions .stream() .map(e -> new StreamPartition<>(e.topic(), e.partition())) .collect(Collectors.toSet()); }
@Override public void handleResponse(AbstractResponse abstractResponse) { AlterReplicaLogDirsResponse response = (AlterReplicaLogDirsResponse) abstractResponse; for (Map.Entry<TopicPartition, Errors> responseEntry: response.responses().entrySet()) { TopicPartition tp = responseEntry.getKey(); Errors error = responseEntry.getValue(); TopicPartitionReplica replica = new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId); KafkaFutureImpl<Void> future = futures.get(replica); if (future == null) { handleFailure(new IllegalStateException( "The partition " + tp + " in the response from broker " + brokerId + " is not in the request")); } else if (error == Errors.NONE) { future.complete(null); } else { future.completeExceptionally(error.exception()); } } } @Override
@Override protected Struct toStruct(short version) { Struct struct = new Struct(ApiKeys.STOP_REPLICA.responseSchema(version)); List<Struct> responseDatas = new ArrayList<>(responses.size()); for (Map.Entry<TopicPartition, Errors> response : responses.entrySet()) { Struct partitionData = struct.instance(PARTITIONS); TopicPartition partition = response.getKey(); partitionData.set(TOPIC_NAME, partition.topic()); partitionData.set(PARTITION_ID, partition.partition()); partitionData.set(ERROR_CODE, response.getValue().code()); responseDatas.add(partitionData); } struct.set(PARTITIONS, responseDatas.toArray()); struct.set(ERROR_CODE, error.code()); return struct; }
@Override protected Struct toStruct(short version) { Struct struct = new Struct(ApiKeys.LEADER_AND_ISR.responseSchema(version)); List<Struct> responseDatas = new ArrayList<>(responses.size()); for (Map.Entry<TopicPartition, Errors> response : responses.entrySet()) { Struct partitionData = struct.instance(PARTITIONS); TopicPartition partition = response.getKey(); partitionData.set(TOPIC_NAME, partition.topic()); partitionData.set(PARTITION_ID, partition.partition()); partitionData.set(ERROR_CODE, response.getValue().code()); responseDatas.add(partitionData); } struct.set(PARTITIONS, responseDatas.toArray()); struct.set(ERROR_CODE, error.code()); return struct; }
@Override protected Struct toStruct(short version) { Struct struct = new Struct(ApiKeys.CONTROLLED_SHUTDOWN.responseSchema(version)); struct.set(ERROR_CODE, error.code()); List<Struct> partitionsRemainingList = new ArrayList<>(partitionsRemaining.size()); for (TopicPartition topicPartition : partitionsRemaining) { Struct topicPartitionStruct = struct.instance(PARTITIONS_REMAINING_KEY_NAME); topicPartitionStruct.set(TOPIC_NAME, topicPartition.topic()); topicPartitionStruct.set(PARTITION_ID, topicPartition.partition()); partitionsRemainingList.add(topicPartitionStruct); } struct.set(PARTITIONS_REMAINING_KEY_NAME, partitionsRemainingList.toArray()); return struct; } }
private void checkValues(TopicPartition deSerTP) { //assert deserialized values are same as original assertEquals("partition number should be " + partNum + " but got " + deSerTP.partition(), partNum, deSerTP.partition()); assertEquals("topic should be " + topicName + " but got " + deSerTP.topic(), topicName, deSerTP.topic()); }
private void recordPartitionLead(TopicPartition tp, long lead) { this.recordsFetchLead.record(lead); String name = partitionLeadMetricName(tp); Sensor recordsLead = this.metrics.getSensor(name); if (recordsLead == null) { Map<String, String> metricTags = new HashMap<>(2); metricTags.put("topic", tp.topic().replace('.', '_')); metricTags.put("partition", String.valueOf(tp.partition())); recordsLead = this.metrics.sensor(name); recordsLead.add(this.metrics.metricInstance(metricsRegistry.partitionRecordsLead, metricTags), new Value()); recordsLead.add(this.metrics.metricInstance(metricsRegistry.partitionRecordsLeadMin, metricTags), new Min()); recordsLead.add(this.metrics.metricInstance(metricsRegistry.partitionRecordsLeadAvg, metricTags), new Avg()); } recordsLead.record(lead); }
private void recordPartitionLag(TopicPartition tp, long lag) { this.recordsFetchLag.record(lag); String name = partitionLagMetricName(tp); Sensor recordsLag = this.metrics.getSensor(name); if (recordsLag == null) { Map<String, String> metricTags = new HashMap<>(2); metricTags.put("topic", tp.topic().replace('.', '_')); metricTags.put("partition", String.valueOf(tp.partition())); recordsLag = this.metrics.sensor(name); recordsLag.add(this.metrics.metricInstance(metricsRegistry.partitionRecordsLag, metricTags), new Value()); recordsLag.add(this.metrics.metricInstance(metricsRegistry.partitionRecordsLagMax, metricTags), new Max()); recordsLag.add(this.metrics.metricInstance(metricsRegistry.partitionRecordsLagAvg, metricTags), new Avg()); } recordsLag.record(lag); }
@Override public ConsumerRecords<K, V> onConsume(ConsumerRecords<K, V> records) { onConsumeCount++; if (throwExceptionOnConsume) throw new KafkaException("Injected exception in FilterConsumerInterceptor.onConsume."); // filters out topic/partitions with partition == FILTER_PARTITION Map<TopicPartition, List<ConsumerRecord<K, V>>> recordMap = new HashMap<>(); for (TopicPartition tp : records.partitions()) { if (tp.partition() != filterPartition) recordMap.put(tp, records.records(tp)); } return new ConsumerRecords<K, V>(recordMap); }
@Test @SuppressWarnings("deprecation") public void testConstructionWithMissingRelativeOffset() { TopicPartition tp = new TopicPartition("foo", 0); long timestamp = 2340234L; int keySize = 3; int valueSize = 5; Long checksum = 908923L; RecordMetadata metadata = new RecordMetadata(tp, -1L, -1L, timestamp, checksum, keySize, valueSize); assertEquals(tp.topic(), metadata.topic()); assertEquals(tp.partition(), metadata.partition()); assertEquals(timestamp, metadata.timestamp()); assertFalse(metadata.hasOffset()); assertEquals(-1L, metadata.offset()); assertEquals(checksum.longValue(), metadata.checksum()); assertEquals(keySize, metadata.serializedKeySize()); assertEquals(valueSize, metadata.serializedValueSize()); }