/** * This is used only for POST and PUT call to create the pojo. * * @param jsonRequest * @return */ public static TopicPartition init(String jsonRequest) { JSONObject jsonObject = JSON.parseObject(jsonRequest); if (!jsonObject.containsKey("topic")) { throw new RuntimeException("Cannot initialize TopicPartitionInfo, missing field: topic"); } if (!jsonObject.containsKey("numPartitions")) { throw new RuntimeException( "Cannot initialize TopicPartitionInfo, missing field: numPartitions"); } return new TopicPartition(jsonObject.getString("topic"), jsonObject.getIntValue("numPartitions")); }
public synchronized void addTopicToMirrorMaker(TopicPartition topicPartitionInfo) { this.addTopicToMirrorMaker(topicPartitionInfo.getTopic(), topicPartitionInfo.getPartition()); }
@Override @Put("json") public Representation put(Representation entity) { try { String jsonRequest = entity.getText(); TopicPartition topicPartitionInfo = TopicPartition.init(jsonRequest); if (_autoTopicWhitelistingManager != null) { _autoTopicWhitelistingManager.removeFromBlacklist(topicPartitionInfo.getTopic()); } if (_helixMirrorMakerManager.isTopicExisted(topicPartitionInfo.getTopic())) { _helixMirrorMakerManager.expandTopicInMirrorMaker(topicPartitionInfo); return new StringRepresentation( String.format("Successfully expand topic: %s", topicPartitionInfo)); } else { getResponse().setStatus(Status.CLIENT_ERROR_NOT_FOUND); return new StringRepresentation(String.format( "Failed to expand topic, topic: %s is not existed!", topicPartitionInfo.getTopic())); } } catch (IOException e) { LOGGER.error("Got error during processing Put request", e); getResponse().setStatus(Status.SERVER_ERROR_INTERNAL); return new StringRepresentation( String.format("Failed to expand topic, with exception: %s", e)); } }
public TopicPartition getTopicPartition(String topic) { TopicPartition topicPartition = _topicPartitionInfoMap.get(topic); if (topicPartition != null) { return new TopicPartition(topic, topicPartition.getPartition()); } else { return null; } }
public Request getTopicCreationRequestUrl(String topic, int numPartitions) { Request request = new Request(Method.POST, _baseUrl + "/topics/"); TopicPartition topicPartitionInfo = new TopicPartition(topic, numPartitions); request.setEntity(topicPartitionInfo.toJSON().toJSONString(), MediaType.APPLICATION_JSON); return request; }
int numPartitionsInMirrorMaker = _helixMirrorMakerManager.getIdealStateForTopic(topic).getNumPartitions(); if (numPartitionsInMirrorMaker != tp.getPartition()) { int mismatchedPartitions = Math.abs(numPartitionsInMirrorMaker - tp.getPartition()); if (_enableAutoTopicExpansion && (tp.getPartition() > numPartitionsInMirrorMaker)) { topic, numPartitionsInMirrorMaker, tp.getPartition()); _numAutoExpandedTopics.inc(); _numAutoExpandedTopicPartitions.inc(mismatchedPartitions); LOGGER.warn( "Number of partitions not matched for topic {} between mirrormaker:{} and source kafka broker: {}!", topic, numPartitionsInMirrorMaker, tp.getPartition());
TopicPartition.getWorkloadComparator(_helixMirrorMakerManager.getWorkloadInfoRetriever()))); sameTopic.add(tp); i++; while (i < partitionsToBeAssigned.size() && partitionsToBeAssigned.get(i).getTopic().equals(tp.getTopic())) { sameTopic.add(partitionsToBeAssigned.get(i)); i++;
public TopicWorkload totalWorkload(WorkloadInfoRetriever infoRetriever, ITopicWorkloadWeighter weighter) { TopicWorkload total = new TopicWorkload(0, 0, 0); for (TopicPartition part : _topicPartitionSet) { TopicWorkload tw = infoRetriever.topicWorkload(part.getTopic()); double weight = (weighter == null) ? 1.0 : weighter.partitionWeight(part); total.add(tw.getBytesPerSecondPerPartition() * weight, tw.getMsgsPerSecondPerPartition() * weight); } return total; }
private Set<String> getPartitionMismatchedTopics() { Set<String> partitionsMismatchedTopics = new HashSet<String>(); for (String topicName : _helixMirrorMakerManager.getTopicLists()) { int numPartitionsInHelix = _helixMirrorMakerManager.getIdealStateForTopic(topicName).getNumPartitions(); if (_srcKafkaTopicObserver.getTopicPartition(topicName) != null) { int numPartitionsInSrcBroker = _srcKafkaTopicObserver.getTopicPartition(topicName).getPartition(); if (numPartitionsInHelix != numPartitionsInSrcBroker) { partitionsMismatchedTopics.add(topicName); } } } return partitionsMismatchedTopics; }
public Request getTopicExpansionRequestUrl(String topic, int numPartitions) { Request request = new Request(Method.PUT, _baseUrl + "/topics/"); TopicPartition topicPartitionInfo = new TopicPartition(topic, numPartitions); request.setEntity(topicPartitionInfo.toJSON().toJSONString(), MediaType.APPLICATION_JSON); return request; } }
public TopicPartition getTopicPartition(String topic) { TopicPartition topicPartition = _topicPartitionInfoMap.get(topic); if (topicPartition != null) { return new TopicPartition(topic, topicPartition.getPartition()); } else { return null; } }
/** * Return the lagging time if the given partition has lag. * * @param tp topic partition * @return the lagging time in seconds if the given partition has lag; otherwise return 0. */ private long getLagTime(TopicPartition tp) { TopicPartitionLag tpl = _helixMirrorMakerManager.getOffsetMonitor().getTopicPartitionOffset(tp); if (tpl == null || tpl.getLatestOffset() <= 0 || tpl.getCommitOffset() <= 0 || System.currentTimeMillis() - tpl.getTimeStamp() > _offsetMaxValidTimeMillis) { return 0; } long lag = tpl.getLatestOffset() - tpl.getCommitOffset(); if (lag <= _minLagOffset) { return 0; } double msgRate = _helixMirrorMakerManager.getWorkloadInfoRetriever().topicWorkload(tp.getTopic()) .getMsgsPerSecondPerPartition(); if (msgRate < 1) { msgRate = 1; } double lagTime = lag / msgRate; if (lagTime > _minLagTimeSec) { return Math.round(lagTime); } return 0; }
public synchronized void expandTopicInMirrorMaker(TopicPartition topicPartitionInfo) { this.expandTopicInMirrorMaker(topicPartitionInfo.getTopic(), topicPartitionInfo.getPartition()); }
scala.collection.Map<Object, Seq<Object>> partitionsMap = partitionAssignmentForTopics.get(topic).get(); TopicPartition tp = new TopicPartition(topic, partitionsMap.size()); _topicPartitionInfoMap.put(topic, tp); } catch (Exception e) {
@Test public void testAutoTopic() { for (int i = 0; i < 10; ++i) { Assert.assertEquals(helixMirrorMakerManager.getTopicLists().size(), i); String topicName = "testTopic" + i; // Create Kafka topic KafkaStarterUtils.createTopic(topicName, KafkaStarterUtils.DEFAULT_ZK_STR); try { Thread.sleep(5000); } catch (Exception e) { } Assert.assertEquals(kafkaBrokerTopicObserver.getNumTopics(), 1 + i); for (int j = 0; j <= i; ++j) { Assert.assertTrue(kafkaBrokerTopicObserver.getAllTopics().contains("testTopic" + j)); Assert.assertEquals( kafkaBrokerTopicObserver.getTopicPartition("testTopic" + j).getPartition(), 1); } } } }
public Request getTopicExpansionRequestUrl(String topic, int numPartitions) { Request request = new Request(Method.PUT, _baseUrl + "/topics/"); TopicPartition topicPartitionInfo = new TopicPartition(topic, numPartitions); request.setEntity(topicPartitionInfo.toJSON().toJSONString(), MediaType.APPLICATION_JSON); return request; } }
topicPartitionInfo = TopicPartition.init(jsonRequest); _autoTopicWhitelistingManager.removeFromBlacklist(topicPartitionInfo.getTopic()); if (_helixMirrorMakerManager.isTopicExisted(topicPartitionInfo.getTopic())) { getResponse().setStatus(Status.CLIENT_ERROR_NOT_FOUND); return new StringRepresentation(String.format( "Failed to add new topic: %s, it is already existed!", topicPartitionInfo.getTopic())); } else { _helixMirrorMakerManager.addTopicToMirrorMaker(topicPartitionInfo);
TopicWorkload tw = retriever.topicWorkload(tp.getTopic()); totalWorkload.add(weight * tw.getBytesPerSecondPerPartition(), weight * tw.getMsgsPerSecondPerPartition()); noPartitions = false; if (weight > 0) { partitionCount++; TopicWorkload tw = retriever.topicWorkload(tp.getTopic()); totalWorkload.add(weight * tw.getBytesPerSecondPerPartition(), weight * tw.getMsgsPerSecondPerPartition()); noPartitions = false; if (weighter.partitionWeight(tp) > 0) { double weight = weighter.partitionWeight(tp); TopicWorkload tw = retriever.topicWorkload(tp.getTopic()); if (tw.compareTotal(averageWorkload) > 0) { excludeInstances++; continue; TopicWorkload tpWorkload = retriever.topicWorkload(tp.getTopic()); workloadToRemove.add(weight * tpWorkload.getBytesPerSecondPerPartition(), weight * tpWorkload.getMsgsPerSecondPerPartition());
private void whitelistCandiateTopics(Set<String> candidateTopicsToWhitelist) { for (String topic : candidateTopicsToWhitelist) { TopicPartition tp = _srcKafkaTopicObserver.getTopicPartition(topic); if (tp == null) { LOGGER.error("Shouldn't hit here, don't know why topic {} is not in src Kafka cluster", topic); _numErrorTopics.inc(); } else { if (_helixMirrorMakerManager.isTopicExisted(topic)) { LOGGER.info("Trying to expand topic: {} with {} partitions", tp.getTopic(), tp.getPartition()); _helixMirrorMakerManager.expandTopicInMirrorMaker(tp); _numAutoExpandedTopics.inc(); } else { LOGGER.info("Trying to whitelist topic: {} with {} partitions", tp.getTopic(), tp.getPartition()); _helixMirrorMakerManager.addTopicToMirrorMaker(tp); _numWhitelistedTopics.inc(); } } } }
@Override public void handleChildChange(String parentPath, List<String> currentChilds) throws Exception { if (!tryToRefreshCache()) { synchronized (_lock) { Set<String> newAddedTopics = new HashSet<String>(currentChilds); Set<String> currentServingTopics = getAllTopics(); newAddedTopics.removeAll(currentServingTopics); for (String existedTopic : currentServingTopics) { if (!currentChilds.contains(existedTopic)) { _topicPartitionInfoMap.remove(existedTopic); } } scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentForTopics = _zkUtils.getPartitionAssignmentForTopics( JavaConversions.asScalaBuffer(ImmutableList.copyOf(newAddedTopics))); for (String topic : newAddedTopics) { try { scala.collection.Map<Object, Seq<Object>> partitionsMap = partitionAssignmentForTopics.get(topic).get(); TopicPartition tp = new TopicPartition(topic, partitionsMap.size()); _topicPartitionInfoMap.put(topic, tp); } catch (Exception e) { LOGGER.warn("Failed to get topicPartition info for {} from kafka zk: {}", topic, e); } } _kafkaTopicsCounter.inc(_topicPartitionInfoMap.size() - _kafkaTopicsCounter.getCount()); } } }