public static void main(String[] args) throws Exception { CommandLineParser parser = new DefaultParser(); CommandLine cmd = parser.parse(ManagerConf.constructManagerOptions(), args); if (cmd.getOptions().length == 0 || cmd.hasOption("help")) { HelpFormatter f = new HelpFormatter(); f.printHelp("OptionsTip", ManagerConf.constructManagerOptions()); System.exit(0); } final ManagerStarter managerStarter = ManagerStarter.init(cmd); Runtime.getRuntime().addShutdownHook(new Thread() { public void run() { try { managerStarter.stop(); } catch (Exception e) { LOGGER.error("Caught error during shutdown! ", e); } } }); try { managerStarter.start(); } catch (Exception e) { LOGGER.error("Cannot start uReplicator-Manager: ", e); } }
public ControllerHelixManager(SourceKafkaClusterValidationManager srcKafkaValidationManager, ManagerConf managerConf) { _conf = managerConf; _enableRebalance = managerConf.getEnableRebalance(); _srcKafkaValidationManager = srcKafkaValidationManager; _initMaxNumPartitionsPerRoute = managerConf.getInitMaxNumPartitionsPerRoute(); _maxNumPartitionsPerRoute = managerConf.getMaxNumPartitionsPerRoute(); _initMaxWorkloadPerWorkerByteDc = managerConf.getInitMaxWorkloadPerWorkerByteDc(); _initMaxWorkloadPerWorkerByteXdc = managerConf.getInitMaxWorkloadPerWorkerByteXdc(); _initMaxNumWorkersPerRoute = managerConf.getInitMaxNumWorkersPerRoute(); _maxNumWorkersPerRoute = managerConf.getMaxNumWorkersPerRoute(); _workloadRefreshPeriodInSeconds = managerConf.getWorkloadRefreshPeriodInSeconds(); _workerHelixManager = new WorkerHelixManager(managerConf); _workloadInfoRetrieverMap = new ConcurrentHashMap<>(); _helixZkURL = HelixUtils.getAbsoluteZkPathForHelix(managerConf.getManagerZkStr()); _helixClusterName = MANAGER_CONTROLLER_HELIX_PREFIX + "-" + managerConf.getManagerDeployment(); _instanceId = managerConf.getManagerInstanceId(); _topicToPipelineInstanceMap = new ConcurrentHashMap<>(); _pipelineToInstanceMap = new ConcurrentHashMap<>(); limitedConnMgr.setMaxTotal(100); _httpClient = HttpClients.createMinimal(limitedConnMgr); _controllerPort = managerConf.getControllerPort();
private boolean isValidPipeline(String src, String dst) { return _conf.getSourceClusters().contains(src) && _conf.getDestinationClusters().contains(dst); }
public WorkerHelixManager(ManagerConf managerConf) { _conf = managerConf; _helixZkURL = HelixUtils.getAbsoluteZkPathForHelix(managerConf.getManagerZkStr()); _helixClusterName = MANAGER_WORKER_HELIX_PREFIX + "-" + managerConf.getManagerDeployment(); _instanceId = managerConf.getManagerInstanceId(); _routeToInstanceMap = new ConcurrentHashMap<>(); _availableWorkerList = new ArrayList<>(); }
}; CommandLineParser parser = new DefaultParser(); CommandLine cmd = parser.parse(ManagerConf.constructManagerOptions(), args); ManagerConf conf = ManagerConf.getManagerConf(cmd); Set<String> ret1 = new HashSet<>(); ret1.add("cluster1"); ret1.add("cluster2"); Assert.assertEquals(conf.getSourceClusters(), ret1); Set<String> ret2 = new HashSet<>(); ret2.add("cluster3"); ret2.add("cluster4"); Assert.assertEquals(conf.getDestinationClusters(), ret2); Assert.assertEquals(conf.getManagerZkStr(), "localhost:2181/test"); Assert.assertEquals(conf.getManagerPort().toString(), "9090"); Assert.assertEquals(conf.getManagerDeployment(), "testing"); Assert.assertEquals(conf.getManagerInstanceId(), "instance0"); Assert.assertEquals(conf.getControllerPort().toString(), "8080"); Assert.assertEquals(conf.getC3Host(), "testhost"); Assert.assertEquals(conf.getC3Port().toString(), "8081"); Assert.assertEquals(conf.getWorkloadRefreshPeriodInSeconds().toString(), "10"); Assert.assertEquals(conf.getInitMaxNumPartitionsPerRoute().toString(), "20"); Assert.assertEquals(conf.getMaxNumPartitionsPerRoute().toString(), "30"); Assert.assertEquals(conf.getInitMaxNumWorkersPerRoute().toString(), "20"); Assert.assertEquals(conf.getMaxNumWorkersPerRoute().toString(), "30"); Assert.assertEquals(conf.getProperty("kafka.cluster.zkStr.cluster1"), "localhost:12026/cluster1"); Assert.assertEquals(conf.getProperty("kafka.cluster.servers.cluster1"), "localhost:9091"); Assert.assertEquals(conf.getProperty("kafka.cluster.zkStr.cluster3"), "localhost:12026/cluster3"); Assert.assertEquals(conf.getProperty("kafka.cluster.servers.cluster3"), "localhost:9092");
}; CommandLineParser parser = new DefaultParser(); CommandLine cmd = parser.parse(ManagerConf.constructManagerOptions(), args); ManagerConf conf = ManagerConf.getManagerConf(cmd); conf.addProperty("kafka.cluster.zkStr.cluster1", ZkStarter.DEFAULT_ZK_STR + "/cluster1");
for (int i = 0; i < _conf.getInitMaxNumWorkersPerRoute() && i < _availableWorkerList.size(); i++) { instances.add(_availableWorkerList.get(i)); _helixAdmin.setResourceIdealState(_helixClusterName, pipeline, IdealStateBuilder.expandCustomIdealStateFor(_helixAdmin.getResourceIdealState(_helixClusterName, pipeline), pipeline, String.valueOf(routeId), instances, _conf.getMaxNumWorkersPerRoute()));
HelixKafkaMirrorMakerMetricsReporter(ManagerConf config) { final String environment = config.getEnvironment(); final String clientId = config.getManagerInstanceId(); String[] dcNenv = parse(environment); if (dcNenv == null) { dcNenv[0], config.getMetricsPrefix(), dcNenv[1], clientId); LOGGER.info("Reporter Metric Prefix is : " + _reporterMetricPrefix); _registry = new MetricRegistry();
public static ManagerStarter init(CommandLine cmd) { ManagerConf conf; try { conf = ManagerConf.getManagerConf(cmd); } catch (Exception e) { throw new RuntimeException("Not valid controller configurations!", e); } return new ManagerStarter(conf); }
public void start() throws Exception { _component.getServers().add(Protocol.HTTP, _config.getManagerPort()); _component.getClients().add(Protocol.FILE); _component.getClients().add(Protocol.JAR); Context applicationContext = _component.getContext().createChildContext(); LOGGER.info("Injecting conf and helix to the api context"); applicationContext.getAttributes().put(ManagerConf.class.toString(), _config); applicationContext.getAttributes().put(ControllerHelixManager.class.toString(), _controllerHelixManager); applicationContext.getAttributes() .put(SourceKafkaClusterValidationManager.class.toString(), _srcKafkaValidationManager); Application managerRestApp = new ManagerRestApplication(null); managerRestApp.setContext(applicationContext); _component.getDefaultHost().attach(managerRestApp); try { LOGGER.info("Starting helix manager"); _controllerHelixManager.start(); LOGGER.info("Starting source kafka cluster validation manager"); _srcKafkaValidationManager.start(); LOGGER.info("Starting API component"); _component.start(); } catch (final Exception e) { LOGGER.error("Caught exception while starting uReplicator-Manager", e); throw e; } }
CommandLine cmd = parser.parse(ManagerConf.constructManagerOptions(), args); ManagerConf conf = ManagerConf.getManagerConf(cmd); Assert.fail("Expected exception to be thrown"); } catch (RuntimeException e) { CommandLine cmd = parser.parse(ManagerConf.constructManagerOptions(), args); ManagerConf conf = ManagerConf.getManagerConf(cmd); Assert.fail("Expected exception to be thrown"); } catch (RuntimeException e) { CommandLine cmd = parser.parse(ManagerConf.constructManagerOptions(), args); ManagerConf conf = ManagerConf.getManagerConf(cmd); Assert.fail("Expected exception to be thrown"); } catch (RuntimeException e) { CommandLine cmd = parser.parse(ManagerConf.constructManagerOptions(), args); ManagerConf conf = ManagerConf.getManagerConf(cmd); Assert.fail("Expected exception to be thrown"); } catch (RuntimeException e) { CommandLine cmd = parser.parse(ManagerConf.constructManagerOptions(), args); ManagerConf conf = ManagerConf.getManagerConf(cmd); Assert.fail("Expected exception to be thrown"); } catch (RuntimeException e) { CommandLine cmd = parser.parse(ManagerConf.constructManagerOptions(), args); ManagerConf conf = ManagerConf.getManagerConf(cmd); Assert.assertTrue(conf.getSourceClusters().isEmpty()); Assert.assertTrue(conf.getDestinationClusters().isEmpty());