String sessionId = instance.getSessionId();
@Override protected CurrentState getState(String resourceName) { PropertyKey.Builder keyBuilder = _helixDataAccessor.keyBuilder(); LiveInstance liveInstance = _helixDataAccessor.getProperty(keyBuilder.liveInstance(_instanceName)); String sessionId = liveInstance.getSessionId(); return _helixDataAccessor.getProperty(keyBuilder.currentState(_instanceName, sessionId, resourceName)); }
sessionIdMap.put(liveInstance.getInstanceName(), liveInstance.getSessionId());
private void logSegmentsLoadingInfo() { InstanceDataManager instanceDataManager = _serverInstance.getInstanceDataManager(); if (instanceDataManager == null) { return; } HelixDataAccessor helixDataAccessor = _helixManager.getHelixDataAccessor(); Builder keyBuilder = helixDataAccessor.keyBuilder(); LiveInstance liveInstance = helixDataAccessor.getProperty(keyBuilder.liveInstance(_instanceId)); String sessionId = liveInstance.getSessionId(); List<String> tableNames = _helixAdmin.getResourcesInCluster(_helixClusterName); for (String tableName : tableNames) { PropertyKey currentStateKey = keyBuilder.currentState(_instanceId, sessionId, tableName); CurrentState currentState = helixDataAccessor.getProperty(currentStateKey); int numSegmentsLoaded = instanceDataManager.getAllSegmentsMetadata(tableName).size(); if (currentState != null && currentState.isValid()) { int numSegmentsToLoad = currentState.getPartitionStateMap().size(); LOGGER.info( "Segments are not fully loaded during server bootstrap, current progress: table: {}, segments loading progress [ {} / {} ]", tableName, numSegmentsLoaded, numSegmentsToLoad); } } }
return toggle ? PinotResourceManagerResponse.FAILURE : PinotResourceManagerResponse.SUCCESS; PropertyKey instanceCurrentStatesKey = _keyBuilder.currentStates(instanceName, liveInstance.getSessionId()); List<CurrentState> instanceCurrentStates = _helixDataAccessor.getChildValues(instanceCurrentStatesKey); if (instanceCurrentStates == null) {
private int getNumSegmentsToLoad() { InstanceDataManager instanceDataManager = _serverInstance.getInstanceDataManager(); if (instanceDataManager == null) { return -1; } HelixDataAccessor helixDataAccessor = _helixManager.getHelixDataAccessor(); Builder keyBuilder = helixDataAccessor.keyBuilder(); int numSegmentsToLoad = 0; List<String> tableNames = _helixAdmin.getResourcesInCluster(_helixClusterName); for (String tableName : tableNames) { LiveInstance liveInstance = helixDataAccessor.getProperty(keyBuilder.liveInstance(_instanceId)); String sessionId = liveInstance.getSessionId(); PropertyKey currentStateKey = keyBuilder.currentState(_instanceId, sessionId, tableName); CurrentState currentState = helixDataAccessor.getProperty(currentStateKey); if (currentState != null && currentState.isValid()) { numSegmentsToLoad += currentState.getPartitionStateMap().size(); } } return numSegmentsToLoad; }
@Override public boolean isValid() { if (getSessionId() == null) { _logger.error("liveInstance does not have session id. id:" + _record.getId()); return false; } if (getHelixVersion() == null) { _logger.error("liveInstance does not have CLM verion. id:" + _record.getId()); return false; } return true; } }
@Override public boolean isValid() { if (getSessionId() == null) { _logger.error("liveInstance does not have session id. id:" + _record.getId()); return false; } if (getHelixVersion() == null) { _logger.error("liveInstance does not have CLM verion. id:" + _record.getId()); return false; } return true; } }
String sessionId = liveInstance.getSessionId(); InstanceConfig instanceConfig = instanceConfigMap.get(instanceName); if (instanceConfig == null) {
@Override public boolean isLeader() { if (_instanceType != InstanceType.CONTROLLER && _instanceType != InstanceType.CONTROLLER_PARTICIPANT) { return false; } if (!isConnected()) { return false; } try { LiveInstance leader = _dataAccessor.getProperty(_keyBuilder.controllerLeader()); if (leader != null) { String leaderName = leader.getInstanceName(); String sessionId = leader.getSessionId(); if (leaderName != null && leaderName.equals(_instanceName) && sessionId != null && sessionId.equals(_sessionId)) { return true; } } } catch (Exception e) { // log } return false; }
@Override public boolean verify() { LiveInstance liveInstance = accessor.getProperty(keyBuilder.controllerLeader()); return liveInstance != null && controllerName.equals(liveInstance.getInstanceName()) && manager.getSessionId().equals(liveInstance.getSessionId()); } }, 1000));
private boolean tryUpdateController(HelixManager manager) { HelixDataAccessor accessor = manager.getHelixDataAccessor(); Builder keyBuilder = accessor.keyBuilder(); LiveInstance leader = new LiveInstance(manager.getInstanceName()); try { leader.setLiveInstance(ManagementFactory.getRuntimeMXBean().getName()); leader.setSessionId(manager.getSessionId()); leader.setHelixVersion(manager.getVersion()); boolean success = accessor.createControllerLeader(leader); if (success) { return true; } else { LOG.info("Unable to become leader probably because some other controller becames the leader"); } } catch (Exception e) { LOG.error( "Exception when trying to updating leader record in cluster:" + manager.getClusterName() + ". Need to check again whether leader node has been created or not", e); } leader = accessor.getProperty(keyBuilder.controllerLeader()); if (leader != null) { String leaderSessionId = leader.getSessionId(); LOG.info("Leader exists for cluster: " + manager.getClusterName() + ", currentLeader: " + leader.getInstanceName() + ", leaderSessionId: " + leaderSessionId); if (leaderSessionId != null && leaderSessionId.equals(manager.getSessionId())) { return true; } } return false; }
StringRepresentation getSchedulerTasksRepresentation() throws JsonGenerationException, JsonMappingException, IOException { String clusterName = (String) getRequest().getAttributes().get("clusterName"); String instanceName = (String) getRequest().getAttributes().get("instanceName"); ZkClient zkClient = (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT); ClusterSetup setupTool = new ClusterSetup(zkClient); List<String> instances = setupTool.getClusterManagementTool().getInstancesInCluster(clusterName); HelixDataAccessor accessor = ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName); LiveInstance liveInstance = accessor.getProperty(accessor.keyBuilder().liveInstance(instanceName)); String sessionId = liveInstance.getSessionId(); StringRepresentation representation = new StringRepresentation("");// (ClusterRepresentationUtil.ObjectToJson(instanceConfigs), // MediaType.APPLICATION_JSON); return representation; }
@Override public void process(ClusterEvent event) throws Exception { _eventId = event.getEventId(); ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name()); final Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.name()); if (cache == null || resourceMap == null) { throw new StageException("Missing attributes in event:" + event + ". Requires DataCache|RESOURCE"); } Map<String, LiveInstance> liveInstances = cache.getLiveInstances(); final CurrentStateOutput currentStateOutput = new CurrentStateOutput(); for (LiveInstance instance : liveInstances.values()) { String instanceName = instance.getInstanceName(); String instanceSessionId = instance.getSessionId(); // update pending messages Map<String, Message> messages = cache.getMessages(instanceName); Map<String, Message> relayMessages = cache.getRelayMessages(instanceName); updatePendingMessages(instance, messages.values(), currentStateOutput, relayMessages.values(), resourceMap); // update current states. Map<String, CurrentState> currentStateMap = cache.getCurrentState(instanceName, instanceSessionId); updateCurrentStates(instance, currentStateMap.values(), currentStateOutput, resourceMap); } event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput); }
@Override public boolean verify() { // Newly created node should have a new creating time but with old session. LiveInstance invalidLeaderNode = accessor.getProperty(keyBuilder.controllerLeader()); // node exist if (invalidLeaderNode == null) return false; // node is newly created if (invalidLeaderNode.getStat().getCreationTime() == originalCreationTime) return false; // node has the same session as the old one, so it's invalid if (!invalidLeaderNode.getSessionId().equals(originalSessionId)) return false; return true; } }, 2000));
private void verifyP2PDisabled() { ClusterDataCache dataCache = new ClusterDataCache(CLUSTER_NAME); dataCache.refresh(_accessor); Map<String, LiveInstance> liveInstanceMap = dataCache.getLiveInstances(); for (LiveInstance instance : liveInstanceMap.values()) { Map<String, CurrentState> currentStateMap = dataCache.getCurrentState(instance.getInstanceName(), instance.getSessionId()); Assert.assertNotNull(currentStateMap); for (CurrentState currentState : currentStateMap.values()) { for (String partition : currentState.getPartitionStateMap().keySet()) { String state = currentState.getState(partition); if (state.equalsIgnoreCase("MASTER")) { String triggerHost = currentState.getTriggerHost(partition); Assert.assertEquals(triggerHost, _controllerName, state + " of " + partition + " on " + instance.getInstanceName() + " was triggered by " + triggerHost); } } } } }
private void verifyP2PEnabled(long startTime) { ClusterDataCache dataCache = new ClusterDataCache(CLUSTER_NAME); dataCache.refresh(_accessor); Map<String, LiveInstance> liveInstanceMap = dataCache.getLiveInstances(); for (LiveInstance instance : liveInstanceMap.values()) { Map<String, CurrentState> currentStateMap = dataCache.getCurrentState(instance.getInstanceName(), instance.getSessionId()); Assert.assertNotNull(currentStateMap); for (CurrentState currentState : currentStateMap.values()) { for (String partition : currentState.getPartitionStateMap().keySet()) { String state = currentState.getState(partition); long start = currentState.getStartTime(partition); if (state.equalsIgnoreCase("MASTER") && start > startTime) { String triggerHost = currentState.getTriggerHost(partition); if (!triggerHost.equals(_controllerName)) { p2pTrigged ++; } total ++; } } } } }
@Override public void process(ClusterEvent event) throws Exception { _eventId = event.getEventId(); ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name()); final Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.name()); if (cache == null || resourceMap == null) { throw new StageException("Missing attributes in event:" + event + ". Requires DataCache|RESOURCE"); } Map<String, LiveInstance> liveInstances = cache.getLiveInstances(); final CurrentStateOutput currentStateOutput = new CurrentStateOutput(); for (LiveInstance instance : liveInstances.values()) { String instanceName = instance.getInstanceName(); String instanceSessionId = instance.getSessionId(); // update pending messages Map<String, Message> messages = cache.getMessages(instanceName); updatePendingMessages(instance, messages.values(), currentStateOutput, resourceMap); // update current states. Map<String, CurrentState> currentStateMap = cache.getCurrentState(instanceName, instanceSessionId); updateCurrentStates(instance, currentStateMap.values(), currentStateOutput, resourceMap); } if (!cache.isTaskCache()) { ClusterStatusMonitor clusterStatusMonitor = event.getAttribute(AttributeName.clusterStatusMonitor.name()); // TODO Update the status async -- jjwang updateTopStateStatus(cache, clusterStatusMonitor, resourceMap, currentStateOutput); } event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput); }
@Test public void testAddedFieldsInCurrentState() { String instanceName = PARTICIPANT_PREFIX + "_" + _startPort; HelixDataAccessor accessor = _manager.getHelixDataAccessor(); LiveInstance liveInstance = accessor.getProperty(accessor.keyBuilder().liveInstance(instanceName)); CurrentState currentState = accessor.getProperty(accessor.keyBuilder() .currentState(instanceName, liveInstance.getSessionId(), WorkflowGenerator.DEFAULT_TGT_DB)); // Test start time should happen after test start time Assert.assertTrue( currentState.getStartTime(WorkflowGenerator.DEFAULT_TGT_DB + "_0") >= _testStartTime); // Test end time is always larger than start time Assert.assertTrue( currentState.getEndTime(WorkflowGenerator.DEFAULT_TGT_DB + "_0") >= currentState .getStartTime(WorkflowGenerator.DEFAULT_TGT_DB + "_0")); // Final state is MASTER, so SLAVE will be the previous state Assert.assertEquals(currentState.getPreviousState(WorkflowGenerator.DEFAULT_TGT_DB + "_0"), "SLAVE"); } }
private void clearStatusUpdate(String clusterName, String instance, String resource, String partition) { // clear status update for error partition so verify() will not fail on old // errors ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_gZkClient)); Builder keyBuilder = accessor.keyBuilder(); LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instance)); accessor.removeProperty(keyBuilder.stateTransitionStatus(instance, liveInstance.getSessionId(), resource, partition)); } // TODO: throw exception in reset()