@Override public void delete_node(String path) { stateStorage.delete_node(path); }
@Override public void removeBlobstoreKey(String blobKey) { LOG.debug("remove key {}", blobKey); stateStorage.delete_node(ClusterUtils.blobstorePath(blobKey)); }
@Override public void removeStormBase(String stormId) { stateStorage.delete_node(ClusterUtils.stormPath(stormId)); }
@Override public void removeKeyVersion(String blobKey) { stateStorage.delete_node(ClusterUtils.blobstoreMaxKeySequenceNumberPath(blobKey)); }
@Override public void removeStorm(String stormId) { stateStorage.delete_node(ClusterUtils.assignmentPath(stormId)); this.assignmentsBackend.clearStateForStorm(stormId); stateStorage.delete_node(ClusterUtils.credentialsPath(stormId)); stateStorage.delete_node(ClusterUtils.logConfigPath(stormId)); stateStorage.delete_node(ClusterUtils.profilerConfigPath(stormId)); removeStormBase(stormId); }
@Override public void teardownTopologyErrors(String stormId) { try { stateStorage.delete_node(ClusterUtils.errorStormRoot(stormId)); } catch (Exception e) { if (Utils.exceptionCauseIsInstanceOf(KeeperException.class, e)) { // do nothing LOG.warn("Could not teardown errors for {}.", stormId); } else { throw e; } } }
@Override public void removeBackpressure(String stormId) { try { stateStorage.delete_node(ClusterUtils.backpressureStormRoot(stormId)); } catch (Exception e) { if (Utils.exceptionCauseIsInstanceOf(KeeperException.class, e)) { // do nothing LOG.warn("Could not teardown backpressure node for {}.", stormId); } else { throw e; } } }
@Override public void removeWorkerBackpressure(String stormId, String node, Long port) { String path = ClusterUtils.backpressurePath(stormId, node, port); boolean existed = stateStorage.node_exists(path, false); if (existed) { stateStorage.delete_node(path); } }
@Override public void removeAllPrivateWorkerKeys(String topologyId) { for (WorkerTokenServiceType type : WorkerTokenServiceType.values()) { String path = ClusterUtils.secretKeysPath(type, topologyId); try { LOG.info("Removing worker keys under {}", path); stateStorage.delete_node(path); } catch (RuntimeException e) { //This should never happen because only the primary nimbus is active, but just in case // declare the race safe, even if we lose it. if (!Utils.exceptionCauseIsInstanceOf(KeeperException.NoNodeException.class, e)) { throw e; } } } }
@Override public void addNimbusHost(final String nimbusId, final NimbusSummary nimbusSummary) { // explicit delete for ephmeral node to ensure this session creates the entry. stateStorage.delete_node(ClusterUtils.nimbusPath(nimbusId)); stateStorage.add_listener((curatorFramework, connectionState) -> { LOG.info("Connection state listener invoked, zookeeper connection state has changed to {}", connectionState); if (connectionState.equals(ConnectionState.RECONNECTED)) { LOG.info("Connection state has changed to reconnected so setting nimbuses entry one more time"); // explicit delete for ephemeral node to ensure this session creates the entry. stateStorage.delete_node(ClusterUtils.nimbusPath(nimbusId)); stateStorage.set_ephemeral_node(ClusterUtils.nimbusPath(nimbusId), Utils.serialize(nimbusSummary), defaultAcls); } }); stateStorage.set_ephemeral_node(ClusterUtils.nimbusPath(nimbusId), Utils.serialize(nimbusSummary), defaultAcls); }
@Override public void deleteTopologyProfileRequests(String stormId, ProfileRequest profileRequest) { ProfileAction profileAction = profileRequest.get_action(); String host = profileRequest.get_nodeInfo().get_node(); Long port = profileRequest.get_nodeInfo().get_port_iterator().next(); String path = ClusterUtils.profilerConfigPath(stormId, host, port, profileAction); stateStorage.delete_node(path); }
String znodePath = path + ClusterUtils.ZK_SEPERATOR + childrens.remove(0); try { stateStorage.delete_node(znodePath); } catch (Exception e) { if (Utils.exceptionCauseIsInstanceOf(KeeperException.NoNodeException.class, e)) {
@Override public void removeExpiredPrivateWorkerKeys(String topologyId) { for (WorkerTokenServiceType type : WorkerTokenServiceType.values()) { String basePath = ClusterUtils.secretKeysPath(type, topologyId); try { for (String version : stateStorage.get_children(basePath, false)) { String fullPath = basePath + ClusterUtils.ZK_SEPERATOR + version; try { PrivateWorkerKey key = ClusterUtils.maybeDeserialize(stateStorage.get_data(fullPath, false), PrivateWorkerKey.class); if (Time.currentTimeMillis() > key.get_expirationTimeMillis()) { LOG.info("Removing expired worker key {}", fullPath); stateStorage.delete_node(fullPath); } } catch (RuntimeException e) { //This should never happen because only the primary nimbus is active, but just in case // declare the race safe, even if we lose it. if (!Utils.exceptionCauseIsInstanceOf(KeeperException.NoNodeException.class, e)) { throw e; } } } } catch (RuntimeException e) { //No node for basePath is OK, nothing to remove if (!Utils.exceptionCauseIsInstanceOf(KeeperException.NoNodeException.class, e)) { throw e; } } } }
@Override public void removeBlobstoreKey(String blobKey) { LOG.debug("remove key {}", blobKey); stateStorage.delete_node(ClusterUtils.blobstorePath(blobKey)); }
@Override public void removeKeyVersion(String blobKey) { stateStorage.delete_node(ClusterUtils.blobstoreMaxKeySequenceNumberPath(blobKey)); }
@Override public void removeStorm(String stormId) { stateStorage.delete_node(ClusterUtils.assignmentPath(stormId)); stateStorage.delete_node(ClusterUtils.credentialsPath(stormId)); stateStorage.delete_node(ClusterUtils.logConfigPath(stormId)); stateStorage.delete_node(ClusterUtils.profilerConfigPath(stormId)); removeStormBase(stormId); }
@Override public void teardownTopologyErrors(String stormId) { try { stateStorage.delete_node(ClusterUtils.errorStormRoot(stormId)); } catch (Exception e) { if (Utils.exceptionCauseIsInstanceOf(KeeperException.class, e)) { // do nothing LOG.warn("Could not teardown errors for {}.", stormId); } else { throw e; } } }
@Override public void removeWorkerBackpressure(String stormId, String node, Long port) { String path = ClusterUtils.backpressurePath(stormId, node, port); boolean existed = stateStorage.node_exists(path, false); if (existed) { stateStorage.delete_node(path); } }
@Override public void stateChanged(CuratorFramework curatorFramework, ConnectionState connectionState) { LOG.info("Connection state listener invoked, zookeeper connection state has changed to {}", connectionState); if (connectionState.equals(ConnectionState.RECONNECTED)) { LOG.info("Connection state has changed to reconnected so setting nimbuses entry one more time"); // explicit delete for ephmeral node to ensure this session creates the entry. stateStorage.delete_node(ClusterUtils.nimbusPath(nimbusId)); stateStorage.set_ephemeral_node(ClusterUtils.nimbusPath(nimbusId), Utils.serialize(nimbusSummary), acls); } } });
@Override public void deleteTopologyProfileRequests(String stormId, ProfileRequest profileRequest) { ProfileAction profileAction = profileRequest.get_action(); String host = profileRequest.get_nodeInfo().get_node(); Long port = profileRequest.get_nodeInfo().get_port_iterator().next(); String path = ClusterUtils.profilerConfigPath(stormId, host, port, profileAction); stateStorage.delete_node(path); }