@Override public boolean isActive() { return node.getState() != NodeState.SHUT_DOWN; }
@Override public boolean isActive() { return node.getState() != NodeState.SHUT_DOWN; }
private boolean nodeActive() { return nodeEngine.getNode().getState() != NodeState.SHUT_DOWN; }
private boolean nodeActive() { return nodeEngine.getNode().getState() != NodeState.SHUT_DOWN; }
private boolean engineActive() { NodeState state = context.node.getState(); if (state == NodeState.ACTIVE) { return true; } boolean allowed = state == NodeState.PASSIVE && (op instanceof AllowedDuringPassiveState); if (!allowed) { notifyError(new HazelcastInstanceNotActiveException("State: " + state + " Operation: " + op.getClass())); remote = false; } return allowed; }
private boolean engineActive() { NodeState state = context.node.getState(); if (state == NodeState.ACTIVE) { return true; } boolean allowed = state == NodeState.PASSIVE && (op instanceof AllowedDuringPassiveState); if (!allowed) { notifyError(new HazelcastInstanceNotActiveException("State: " + state + " Operation: " + op.getClass())); remote = false; } return allowed; }
private void handleHealthcheck(HttpHeadCommand command) { Node node = textCommandService.getNode(); NodeState nodeState = node.getState(); ClusterServiceImpl clusterService = node.getClusterService(); ClusterState clusterState = clusterService.getClusterState(); int clusterSize = clusterService.getMembers().size(); InternalPartitionService partitionService = node.getPartitionService(); long migrationQueueSize = partitionService.getMigrationQueueSize(); Map<String, Object> headervals = new LinkedHashMap<String, Object>(); headervals.put("NodeState", nodeState); headervals.put("ClusterState", clusterState); headervals.put("MigrationQueueSize", migrationQueueSize); headervals.put("ClusterSize", clusterSize); command.setResponse(headervals); }
private void handleHealthcheck(HttpHeadCommand command) { Node node = textCommandService.getNode(); NodeState nodeState = node.getState(); ClusterServiceImpl clusterService = node.getClusterService(); ClusterState clusterState = clusterService.getClusterState(); int clusterSize = clusterService.getMembers().size(); InternalPartitionService partitionService = node.getPartitionService(); long migrationQueueSize = partitionService.getMigrationQueueSize(); Map<String, Object> headervals = new LinkedHashMap<String, Object>(); headervals.put("NodeState", nodeState); headervals.put("ClusterState", clusterState); headervals.put("MigrationQueueSize", migrationQueueSize); headervals.put("ClusterSize", clusterSize); command.setResponse(headervals); }
private void sendResponseAfterOperationError(Operation operation, Throwable e) { try { if (node.getState() != NodeState.SHUT_DOWN) { operation.sendResponse(e); } else if (operation.executedLocally()) { operation.sendResponse(new HazelcastInstanceNotActiveException()); } } catch (Throwable t) { logger.warning("While sending op error... op: " + operation + ", error: " + e, t); } }
protected void createNodeState(MemberStateImpl memberState) { Node node = instance.node; ClusterService cluster = instance.node.clusterService; NodeStateImpl nodeState = new NodeStateImpl(cluster.getClusterState(), node.getState(), cluster.getClusterVersion(), node.getVersion()); memberState.setNodeState(nodeState); }
protected void createNodeState(MemberStateImpl memberState) { Node node = instance.node; ClusterService cluster = instance.node.clusterService; NodeStateImpl nodeState = new NodeStateImpl(cluster.getClusterState(), node.getState(), cluster.getClusterVersion(), node.getVersion()); memberState.setNodeState(nodeState); }
private void sendResponseAfterOperationError(Operation operation, Throwable e) { try { if (node.getState() != NodeState.SHUT_DOWN) { operation.sendResponse(e); } else if (operation.executedLocally()) { operation.sendResponse(new HazelcastInstanceNotActiveException()); } } catch (Throwable t) { logger.warning("While sending op error... op: " + operation + ", error: " + e, t); } }
/** Sends a {@link ShutdownResponseOperation} to the {@code address} or takes a shortcut if shutdown is local. */ private void sendShutdownOperation(Address address) { if (node.getThisAddress().equals(address)) { assert !node.isRunning() : "Node state: " + node.getState(); partitionService.onShutdownResponse(); } else { nodeEngine.getOperationService().send(new ShutdownResponseOperation(), address); } }
private void checkNodeState(Operation op) { NodeState state = node.getState(); if (state == NodeState.ACTIVE) { return; } Address localAddress = node.getThisAddress(); if (state == NodeState.SHUT_DOWN) { throw new HazelcastInstanceNotActiveException("Member " + localAddress + " is shut down! Operation: " + op); } if (op instanceof AllowedDuringPassiveState) { return; } // Cluster is in passive state. There is no need to retry. if (nodeEngine.getClusterService().getClusterState() == ClusterState.PASSIVE) { throw new IllegalStateException("Cluster is in " + ClusterState.PASSIVE + " state! Operation: " + op); } // Operation has no partition ID, so it's sent to this node in purpose. // Operation will fail since node is shutting down or cluster is passive. if (op.getPartitionId() < 0) { throw new HazelcastInstanceNotActiveException("Member " + localAddress + " is currently passive! Operation: " + op); } // Custer is not passive but this node is shutting down. // Since operation has a partition ID, it must be retried on another node. throw new RetryableHazelcastException("Member " + localAddress + " is currently shutting down! Operation: " + op); }
/** Sends a {@link ShutdownResponseOperation} to the {@code address} or takes a shortcut if shutdown is local. */ private void sendShutdownOperation(Address address) { if (node.getThisAddress().equals(address)) { assert !node.isRunning() : "Node state: " + node.getState(); partitionService.onShutdownResponse(); } else { nodeEngine.getOperationService().send(new ShutdownResponseOperation(), address); } }
@Override public void run() { final ClusterServiceImpl clusterService = getService(); final ILogger logger = getLogger(); final ClusterState clusterState = clusterService.getClusterState(); if (clusterState == ClusterState.PASSIVE) { final NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine(); if (nodeEngine.isRunning()) { logger.info("Shutting down node in cluster passive state. Requested by: " + getCallerAddress()); new Thread(new Runnable() { @Override public void run() { final Node node = nodeEngine.getNode(); node.hazelcastInstance.getLifecycleService().shutdown(); } }, createThreadName(nodeEngine.getHazelcastInstance().getName(), ".clusterShutdown")).start(); } else { logger.info("Node is already shutting down. NodeState: " + nodeEngine.getNode().getState()); } } else { logger.severe("Can not shut down node because cluster is in " + clusterState + " state. Requested by: " + getCallerAddress()); } }
@Override public void run() { if (node.isMaster()) { MigrationManager migrationManager = partitionService.getMigrationManager(); boolean migrationAllowed = migrationManager.isMigrationAllowed() && !partitionService.isFetchMostRecentPartitionTableTaskRequired(); if (!migrationAllowed) { logger.fine("Not publishing partition runtime state since migration is not allowed."); return; } if (migrationManager.hasOnGoingMigration()) { logger.info("Remaining migration tasks in queue => " + partitionService.getMigrationQueueSize()); } if (node.getState() == NodeState.ACTIVE) { partitionService.publishPartitionRuntimeState(); } } } }
@Override public void run() { if (node.isMaster()) { MigrationManager migrationManager = partitionService.getMigrationManager(); boolean migrationAllowed = migrationManager.areMigrationTasksAllowed() && !partitionService.isFetchMostRecentPartitionTableTaskRequired(); if (!migrationAllowed) { logger.fine("Not publishing partition runtime state since migration is not allowed."); return; } if (migrationManager.hasOnGoingMigration()) { logger.info("Remaining migration tasks in queue => " + partitionService.getMigrationQueueSize() + ". (" + migrationManager.getStats().formatToString(logger.isFineEnabled()) + ")"); } else if (node.getState() == NodeState.ACTIVE) { if (node.getClusterService().getClusterVersion().isGreaterOrEqual(Versions.V3_12)) { partitionService.checkClusterPartitionRuntimeStates(); } else { // RU_COMPAT_3_11 partitionService.publishPartitionRuntimeState(); } } } } }
@Override public void run(DiagnosticsLogWriter writer) { writer.startSection("HazelcastInstance"); writer.writeKeyValueEntry("thisAddress", nodeEngine.getNode().getThisAddress().toString()); writer.writeKeyValueEntry("isRunning", nodeEngine.getNode().isRunning()); writer.writeKeyValueEntry("isLite", nodeEngine.getNode().isLiteMember()); writer.writeKeyValueEntry("joined", nodeEngine.getNode().getClusterService().isJoined()); NodeState state = nodeEngine.getNode().getState(); writer.writeKeyValueEntry("nodeState", state == null ? "null" : state.toString()); writer.writeKeyValueEntry("clusterId", nodeEngine.getClusterService().getClusterId()); writer.writeKeyValueEntry("clusterSize", nodeEngine.getClusterService().getSize()); writer.writeKeyValueEntry("isMaster", nodeEngine.getClusterService().isMaster()); Address masterAddress = nodeEngine.getClusterService().getMasterAddress(); writer.writeKeyValueEntry("masterAddress", masterAddress == null ? "null" : masterAddress.toString()); writer.startSection("Members"); for (Member member : nodeEngine.getClusterService().getMemberImpls()) { writer.writeEntry(member.getAddress().toString()); } writer.endSection(); writer.endSection(); } }
@Override public void run(DiagnosticsLogWriter writer) { writer.startSection("HazelcastInstance"); writer.writeKeyValueEntry("thisAddress", nodeEngine.getNode().getThisAddress().toString()); writer.writeKeyValueEntry("isRunning", nodeEngine.getNode().isRunning()); writer.writeKeyValueEntry("isLite", nodeEngine.getNode().isLiteMember()); writer.writeKeyValueEntry("joined", nodeEngine.getNode().getClusterService().isJoined()); NodeState state = nodeEngine.getNode().getState(); writer.writeKeyValueEntry("nodeState", state == null ? "null" : state.toString()); writer.writeKeyValueEntry("clusterId", nodeEngine.getClusterService().getClusterId()); writer.writeKeyValueEntry("clusterSize", nodeEngine.getClusterService().getSize()); writer.writeKeyValueEntry("isMaster", nodeEngine.getClusterService().isMaster()); Address masterAddress = nodeEngine.getClusterService().getMasterAddress(); writer.writeKeyValueEntry("masterAddress", masterAddress == null ? "null" : masterAddress.toString()); writer.startSection("Members"); for (Member member : nodeEngine.getClusterService().getMemberImpls()) { writer.writeEntry(member.getAddress().toString()); } writer.endSection(); writer.endSection(); } }