public void rebalanceStorageCluster(String clusterName, String resourceName, String group, int replica) { _admin.rebalance(clusterName, resourceName, replica, resourceName, group); }
private void rebalanceResource(String clusterName, IdealState idealState, List<String> instanceNames) { _admin.rebalance(clusterName, idealState, instanceNames); }
public void rebalanceStorageCluster(String clusterName, String resourceName, String group, int replica) { _admin.rebalance(clusterName, resourceName, replica, resourceName, group); }
private void rebalanceResource(String clusterName, IdealState idealState, List<String> instanceNames) { _admin.rebalance(clusterName, idealState, instanceNames); }
public void rebalanceStorageCluster(String clusterName, String resourceName, int replica, String keyPrefix) { _admin.rebalance(clusterName, resourceName, replica, keyPrefix, ""); }
public void rebalanceStorageCluster(String clusterName, String resourceName, int replica, String keyPrefix) { _admin.rebalance(clusterName, resourceName, replica, keyPrefix, ""); }
public void rebalanceCluster(String clusterName, String resourceName, int replica, String keyPrefix, String group) { _admin.rebalance(clusterName, resourceName, replica, keyPrefix, group); }
public void rebalanceCluster(String clusterName, String resourceName, int replica, String keyPrefix, String group) { _admin.rebalance(clusterName, resourceName, replica, keyPrefix, group); }
admin.rebalance(clusterId, resourceName, replicas, keyPrefix, group); break; default:
admin.addResource(clusterName, lockGroupName, numPartitions, "OnlineOffline", RebalanceMode.FULL_AUTO.toString()); admin.rebalance(clusterName, lockGroupName, 1); for (int i = 0; i < numInstances; i++) { final String instanceName = "localhost_" + (12000 + i);
private static void addNode() throws Exception { NUM_NODES = NUM_NODES + 1; int port = 12000 + NUM_NODES - 1; InstanceConfig instanceConfig = new InstanceConfig("localhost_" + port); instanceConfig.setHostName("localhost"); instanceConfig.setPort("" + port); instanceConfig.setInstanceEnabled(true); echo("ADDING NEW NODE :" + instanceConfig.getInstanceName() + ". Partitions will move from old nodes to the new node."); admin.addInstance(CLUSTER_NAME, instanceConfig); INSTANCE_CONFIG_LIST.add(instanceConfig); MyProcess process = new MyProcess(instanceConfig.getInstanceName()); PROCESS_LIST.add(process); admin.rebalance(CLUSTER_NAME, RESOURCE_NAME, 3); process.start(); }
private static void addNode() throws Exception { NUM_NODES = NUM_NODES + 1; int port = 12000 + NUM_NODES - 1; InstanceConfig instanceConfig = new InstanceConfig("localhost_" + port); instanceConfig.setHostName("localhost"); instanceConfig.setPort("" + port); instanceConfig.setInstanceEnabled(true); echo("ADDING NEW NODE :" + instanceConfig.getInstanceName() + ". Partitions will move from old nodes to the new node."); admin.addInstance(CLUSTER_NAME, instanceConfig); INSTANCE_CONFIG_LIST.add(instanceConfig); MyProcess process = new MyProcess(instanceConfig.getInstanceName()); PROCESS_LIST.add(process); admin.rebalance(CLUSTER_NAME, RESOURCE_NAME, 3); process.start(); }
helixAdmin.setResourceIdealState(cfg.clusterName, resource.name, idealState); helixAdmin.rebalance(cfg.clusterName, resource.name, replicas);
helixAdmin.setResourceIdealState(cfg.clusterName, resource.name, idealState); helixAdmin.rebalance(cfg.clusterName, resource.name, replicas);
@Test (dependsOnMethods = "testMaintenanceModeAddNewInstance") public void testMaintenanceModeAddNewResource() throws InterruptedException { _gSetupTool.getClusterManagementTool() .addResource(CLUSTER_NAME, newResourceAddedDuringMaintenanceMode, 7, "MasterSlave", IdealState.RebalanceMode.FULL_AUTO.name()); _gSetupTool.getClusterManagementTool() .rebalance(CLUSTER_NAME, newResourceAddedDuringMaintenanceMode, 3); Assert.assertTrue(_clusterVerifier.verifyByPolling()); ExternalView externalView = _gSetupTool.getClusterManagementTool() .getResourceExternalView(CLUSTER_NAME, newResourceAddedDuringMaintenanceMode); Assert.assertNull(externalView); }
public static void setup() { admin = new ZKHelixAdmin(ZK_ADDRESS); // create cluster echo("Creating cluster: " + CLUSTER_NAME); admin.addCluster(CLUSTER_NAME, true); // Add nodes to the cluster echo("Adding " + NUM_NODES + " participants to the cluster"); for (int i = 0; i < NUM_NODES; i++) { admin.addInstance(CLUSTER_NAME, INSTANCE_CONFIG_LIST.get(i)); echo("\t Added participant: " + INSTANCE_CONFIG_LIST.get(i).getInstanceName()); } // Add a state model StateModelDefinition myStateModel = defineStateModel(); echo("Configuring StateModel: " + "MyStateModel with 1 Master and 1 Slave"); admin.addStateModelDef(CLUSTER_NAME, STATE_MODEL_NAME, myStateModel); // Add a resource with 6 partitions and 2 replicas echo("Adding a resource MyResource: " + "with 6 partitions and 2 replicas"); admin.addResource(CLUSTER_NAME, RESOURCE_NAME, NUM_PARTITIONS, STATE_MODEL_NAME, "AUTO"); // this will set up the ideal state, it calculates the preference list for // each partition similar to consistent hashing admin.rebalance(CLUSTER_NAME, RESOURCE_NAME, NUM_REPLICAS); }
public static void setup() { admin = new ZKHelixAdmin(ZK_ADDRESS); // create cluster echo("Creating cluster: " + CLUSTER_NAME); admin.addCluster(CLUSTER_NAME, true); // Add nodes to the cluster echo("Adding " + NUM_NODES + " participants to the cluster"); for (int i = 0; i < NUM_NODES; i++) { admin.addInstance(CLUSTER_NAME, INSTANCE_CONFIG_LIST.get(i)); echo("\t Added participant: " + INSTANCE_CONFIG_LIST.get(i).getInstanceName()); } // Add a state model StateModelDefinition myStateModel = defineStateModel(); echo("Configuring StateModel: " + "MyStateModel with 1 Master and 1 Slave"); admin.addStateModelDef(CLUSTER_NAME, STATE_MODEL_NAME, myStateModel); // Add a resource with 6 partitions and 2 replicas echo("Adding a resource MyResource: " + "with 6 partitions and 2 replicas"); admin.addResource(CLUSTER_NAME, RESOURCE_NAME, NUM_PARTITIONS, STATE_MODEL_NAME, "AUTO"); // this will set up the ideal state, it calculates the preference list for // each partition similar to consistent hashing admin.rebalance(CLUSTER_NAME, RESOURCE_NAME, NUM_REPLICAS); }
@Test public void testMaintenanceModeAddNewInstance() throws InterruptedException { _gSetupTool.getClusterManagementTool().enableMaintenanceMode(CLUSTER_NAME, true, "Test"); ExternalView prevExternalView = _gSetupTool.getClusterManagementTool() .getResourceExternalView(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB); String instanceName = PARTICIPANT_PREFIX + "_" + (_startPort + 10); _gSetupTool.addInstanceToCluster(CLUSTER_NAME, instanceName); _newInstance = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, instanceName); _newInstance.syncStart(); _gSetupTool.getClusterManagementTool() .rebalance(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB, 3); Assert.assertTrue(_clusterVerifier.verifyByPolling()); ExternalView newExternalView = _gSetupTool.getClusterManagementTool() .getResourceExternalView(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB); Assert.assertEquals(prevExternalView.getRecord().getMapFields(), newExternalView.getRecord().getMapFields()); }
clusterConfig.setPersistIntermediateAssignment(true); _configAccessor.setClusterConfig(CLUSTER_NAME, clusterConfig); _gSetupTool.getClusterManagementTool().rebalance(CLUSTER_NAME, _testDbs.get(0), 3);