public synchronized void addTopicToMirrorMaker(TopicPartition topicPartitionInfo) { this.addTopicToMirrorMaker(topicPartitionInfo.getTopic(), topicPartitionInfo.getPartition()); }
public synchronized void expandTopicInMirrorMaker(TopicPartition topicPartitionInfo) { this.expandTopicInMirrorMaker(topicPartitionInfo.getTopic(), topicPartitionInfo.getPartition()); }
private void whitelistCandiateTopics(Set<String> candidateTopicsToWhitelist) { for (String topic : candidateTopicsToWhitelist) { TopicPartition tp = _srcKafkaTopicObserver.getTopicPartition(topic); if (tp == null) { LOGGER.error("Shouldn't hit here, don't know why topic {} is not in src Kafka cluster", topic); _numErrorTopics.inc(); } else { if (_helixMirrorMakerManager.isTopicExisted(topic)) { LOGGER.info("Trying to expand topic: {} with {} partitions", tp.getTopic(), tp.getPartition()); _helixMirrorMakerManager.expandTopicInMirrorMaker(tp); _numAutoExpandedTopics.inc(); } else { LOGGER.info("Trying to whitelist topic: {} with {} partitions", tp.getTopic(), tp.getPartition()); _helixMirrorMakerManager.addTopicToMirrorMaker(tp); _numWhitelistedTopics.inc(); } } } }
public static Map<String, IdealState> getIdealStatesFromAssignment( Set<InstanceTopicPartitionHolder> newAssignment) { Map<String, CustomModeISBuilder> idealStatesBuilderMap = new HashMap<String, CustomModeISBuilder>(); for (InstanceTopicPartitionHolder instance : newAssignment) { for (TopicPartition tpi : instance.getServingTopicPartitionSet()) { String topicName = tpi.getTopic(); String partition = Integer.toString(tpi.getPartition()); if (!idealStatesBuilderMap.containsKey(topicName)) { final CustomModeISBuilder customModeIdealStateBuilder = new CustomModeISBuilder(topicName); customModeIdealStateBuilder .setStateModel(OnlineOfflineStateModel.name) .setNumReplica(1); idealStatesBuilderMap.put(topicName, customModeIdealStateBuilder); } idealStatesBuilderMap.get(topicName).assignInstanceAndState(partition, instance.getInstanceName(), "ONLINE"); } } Map<String, IdealState> idealStatesMap = new HashMap<String, IdealState>(); for (String topic : idealStatesBuilderMap.keySet()) { IdealState idealState = idealStatesBuilderMap.get(topic).build(); idealState.setMaxPartitionsPerInstance(idealState.getPartitionSet().size()); idealState.setNumPartitions(idealState.getPartitionSet().size()); idealStatesMap.put(topic, idealState); } return idealStatesMap; }
@Override @Put("json") public Representation put(Representation entity) { try { String jsonRequest = entity.getText(); TopicPartition topicPartitionInfo = TopicPartition.init(jsonRequest); if (_autoTopicWhitelistingManager != null) { _autoTopicWhitelistingManager.removeFromBlacklist(topicPartitionInfo.getTopic()); } if (_helixMirrorMakerManager.isTopicExisted(topicPartitionInfo.getTopic())) { _helixMirrorMakerManager.expandTopicInMirrorMaker(topicPartitionInfo); return new StringRepresentation( String.format("Successfully expand topic: %s", topicPartitionInfo)); } else { getResponse().setStatus(Status.CLIENT_ERROR_NOT_FOUND); return new StringRepresentation(String.format( "Failed to expand topic, topic: %s is not existed!", topicPartitionInfo.getTopic())); } } catch (IOException e) { LOGGER.error("Got error during processing Put request", e); getResponse().setStatus(Status.SERVER_ERROR_INTERNAL); return new StringRepresentation( String.format("Failed to expand topic, with exception: %s", e)); } }
_autoTopicWhitelistingManager.removeFromBlacklist(topicPartitionInfo.getTopic()); if (_helixMirrorMakerManager.isTopicExisted(topicPartitionInfo.getTopic())) { getResponse().setStatus(Status.CLIENT_ERROR_NOT_FOUND); return new StringRepresentation(String.format( "Failed to add new topic: %s, it is already existed!", topicPartitionInfo.getTopic())); } else { _helixMirrorMakerManager.addTopicToMirrorMaker(topicPartitionInfo);
@Override public int compare(TopicPartition o1, TopicPartition o2) { TopicWorkload workload1 = infoRetriever.topicWorkload(o1.getTopic()); TopicWorkload workload2 = infoRetriever.topicWorkload(o2.getTopic()); if (workload1 == workload2) { return 0; } int cmp = workload1.compareTo(workload2); if (cmp != 0) { return cmp; } // if workload is the same, compare them based on the name and partition cmp = o1.getTopic().compareTo(o2.getTopic()); if (cmp != 0) { return cmp; } return Integer.compare(o1.getPartition(), o2.getPartition()); } };
public synchronized void addTopicToMirrorMaker(TopicPartition topicPartitionInfo) { this.addTopicToMirrorMaker(topicPartitionInfo.getTopic(), topicPartitionInfo.getPartition()); }
private TopicAndPartition toTopicAndPartition(TopicPartition topicPartition) { return new TopicAndPartition(topicPartition.getTopic(), topicPartition.getPartition()); }
public synchronized void expandTopicInMirrorMaker(TopicPartition topicPartitionInfo) { this.expandTopicInMirrorMaker(topicPartitionInfo.getTopic(), topicPartitionInfo.getPartition()); }
private void whitelistCandiateTopics(Set<String> candidateTopicsToWhitelist) { for (String topic : candidateTopicsToWhitelist) { TopicPartition tp = _srcKafkaTopicObserver.getTopicPartition(topic); if (tp == null) { LOGGER.error("Shouldn't hit here, don't know why topic {} is not in src Kafka cluster", topic); _numErrorTopics.inc(); } else { if (_helixMirrorMakerManager.isTopicExisted(topic)) { LOGGER.info("Trying to expand topic: {} with {} partitions", tp.getTopic(), tp.getPartition()); _helixMirrorMakerManager.expandTopicInMirrorMaker(tp); _numAutoExpandedTopics.inc(); } else { LOGGER.info("Trying to whitelist topic: {} with {} partitions", tp.getTopic(), tp.getPartition()); _helixMirrorMakerManager.addTopicToMirrorMaker(tp); _numWhitelistedTopics.inc(); } } } }
public static Map<String, IdealState> getIdealStatesFromAssignment( Set<InstanceTopicPartitionHolder> newAssignment) { Map<String, CustomModeISBuilder> idealStatesBuilderMap = new HashMap<>(); for (InstanceTopicPartitionHolder instance : newAssignment) { for (TopicPartition tpi : instance.getServingTopicPartitionSet()) { String topicName = tpi.getTopic(); String partition = Integer.toString(tpi.getPartition()); if (!idealStatesBuilderMap.containsKey(topicName)) { final CustomModeISBuilder customModeIdealStateBuilder = new CustomModeISBuilder(topicName); customModeIdealStateBuilder .setStateModel(OnlineOfflineStateModel.name) .setNumReplica(1); idealStatesBuilderMap.put(topicName, customModeIdealStateBuilder); } idealStatesBuilderMap.get(topicName).assignInstanceAndState(partition, instance.getInstanceName(), "ONLINE"); } } Map<String, IdealState> idealStatesMap = new HashMap<>(); for (String topic : idealStatesBuilderMap.keySet()) { IdealState idealState = idealStatesBuilderMap.get(topic).build(); idealState.setMaxPartitionsPerInstance(idealState.getPartitionSet().size()); idealState.setNumPartitions(idealState.getPartitionSet().size()); idealStatesMap.put(topic, idealState); } return idealStatesMap; }
@Override @Put("json") public Representation put(Representation entity) { try { String jsonRequest = entity.getText(); TopicPartition topicPartitionInfo = TopicPartition.init(jsonRequest); if (_autoTopicWhitelistingManager != null) { _autoTopicWhitelistingManager.removeFromBlacklist(topicPartitionInfo.getTopic()); } if (_helixMirrorMakerManager.isTopicExisted(topicPartitionInfo.getTopic())) { _helixMirrorMakerManager.expandTopicInMirrorMaker(topicPartitionInfo); return new StringRepresentation( String.format("Successfully expand topic: %s", topicPartitionInfo)); } else { getResponse().setStatus(Status.CLIENT_ERROR_NOT_FOUND); return new StringRepresentation(String.format( "Failed to expand topic, topic: %s is not existed!", topicPartitionInfo.getTopic())); } } catch (Exception e) { LOGGER.error("Got error during processing Put request", e); getResponse().setStatus(Status.SERVER_ERROR_INTERNAL); return new StringRepresentation( String.format("Failed to expand topic, with exception: %s", e)); } }
public TopicWorkload totalWorkload(WorkloadInfoRetriever infoRetriever, ITopicWorkloadWeighter weighter) { TopicWorkload total = new TopicWorkload(0, 0, 0); for (TopicPartition part : _topicPartitionSet) { TopicWorkload tw = infoRetriever.topicWorkload(part.getTopic()); double weight = (weighter == null) ? 1.0 : weighter.partitionWeight(part); total.add(tw.getBytesPerSecondPerPartition() * weight, tw.getMsgsPerSecondPerPartition() * weight); } return total; }
_autoTopicWhitelistingManager.removeFromBlacklist(topicPartitionInfo.getTopic()); if (_helixMirrorMakerManager.isTopicExisted(topicPartitionInfo.getTopic())) { LOGGER.info("topic {} already on mm", topicName); getResponse().setStatus(Status.CLIENT_ERROR_NOT_FOUND); return new StringRepresentation(String.format( "Failed to add new topic: %s, it is already existed!", topicPartitionInfo.getTopic())); } else { _helixMirrorMakerManager.addTopicToMirrorMaker(topicPartitionInfo);
/** * Return the lagging time if the given partition has lag. * * @param tp topic partition * @return the lagging time in seconds if the given partition has lag; otherwise return 0. */ private long getLagTime(TopicPartition tp) { TopicPartitionLag tpl = _helixMirrorMakerManager.getOffsetMonitor().getTopicPartitionOffset(tp); if (tpl == null || tpl.getLatestOffset() <= 0 || tpl.getCommitOffset() <= 0 || System.currentTimeMillis() - tpl.getTimeStamp() > _offsetMaxValidTimeMillis) { return 0; } long lag = tpl.getLatestOffset() - tpl.getCommitOffset(); if (lag <= _minLagOffset) { return 0; } double msgRate = _helixMirrorMakerManager.getWorkloadInfoRetriever().topicWorkload(tp.getTopic()) .getMsgsPerSecondPerPartition(); if (msgRate < 1) { msgRate = 1; } double lagTime = lag / msgRate; if (lagTime > _minLagTimeSec) { return Math.round(lagTime); } return 0; }
TopicWorkload tw = retriever.topicWorkload(tp.getTopic()); totalWorkload.add(weight * tw.getBytesPerSecondPerPartition(), weight * tw.getMsgsPerSecondPerPartition()); noPartitions = false; if (weight > 0) { partitionCount++; TopicWorkload tw = retriever.topicWorkload(tp.getTopic()); totalWorkload.add(weight * tw.getBytesPerSecondPerPartition(), weight * tw.getMsgsPerSecondPerPartition()); noPartitions = false; if (weighter.partitionWeight(tp) > 0) { double weight = weighter.partitionWeight(tp); TopicWorkload tw = retriever.topicWorkload(tp.getTopic()); if (tw.compareTotal(averageWorkload) > 0) { excludeInstances++; continue; TopicWorkload tpWorkload = retriever.topicWorkload(tp.getTopic()); workloadToRemove.add(weight * tpWorkload.getBytesPerSecondPerPartition(), weight * tpWorkload.getMsgsPerSecondPerPartition());