public void reconnect() { cleanup(); try { init(this.conf); } catch (Exception ex) { LOG.error("reconnect error, maybe nimbus is not alive.", ex); } }
public Map<String, Long> registerMetrics(Set<String> names) { if (!JStormMetrics.enabled) { return new HashMap<>(); } try { synchronized (lock){ if (client == null) { client = new NimbusClientWrapper(); client.init(conf); } } return client.getClient().registerMetrics(topologyId, names); } catch (Exception e) { LOG.error("Failed to gen metric ids", e); if (client != null) { client.cleanup(); client = null; } } return new HashMap<>(); }
private void uploadTaskHeatbeat(Map<Integer, TaskHeartbeat> tmpTaskHbMap) throws Exception { try { if (tmpTaskHbMap == null || tmpTaskHbMap.size() == 0) { return; } TopologyTaskHbInfo topologyTaskHbInfo = new TopologyTaskHbInfo(topologyId, taskId); topologyTaskHbInfo.set_taskHbs(tmpTaskHbMap); client.getClient().updateTaskHeartbeat(topologyTaskHbInfo); String info = ""; for (Entry<Integer, TaskHeartbeat> entry : topologyTaskHbInfo.get_taskHbs().entrySet()) { info += " " + entry.getKey() + "-" + entry.getValue().get_time(); } LOG.info("Update task heartbeat:" + info); } catch (Exception e) { String errorInfo = "Failed to update task heartbeat info"; LOG.error("Failed to update task heartbeat info ", e); if (client != null) { client.cleanup(); client = null; } zkCluster.report_task_error(context.getTopologyId(), context.getThisTaskId(), errorInfo, ErrorConstants.WARN, ErrorConstants.CODE_USER); } }
public static void checkError(Map conf, String topologyName) throws Exception { NimbusClientWrapper client = new NimbusClientWrapper(); try { Map clusterConf = Utils.readStormConfig(); clusterConf.putAll(conf); client.init(clusterConf); String topologyId = client.getClient().getTopologyId(topologyName); Map<Integer, List<TaskError>> errors = getTaskErrors(topologyId, conf); for (Entry<Integer, List<TaskError>> entry : errors.entrySet()) { Integer taskId = entry.getKey(); List<TaskError> errorList = entry.getValue(); for (TaskError error : errorList) { if (ErrorConstants.ERROR.equals(error.getLevel())) { Assert.fail(taskId + " occur error:" + error.getError()); } else if (ErrorConstants.FATAL.equals(error.getLevel())) { Assert.fail(taskId + " occur error:" + error.getError()); } } } } finally { client.cleanup(); } }
public static void killTopology(Map conf, String topologyName) throws Exception { NimbusClientWrapper client = new NimbusClientWrapper(); try { Map clusterConf = Utils.readStormConfig(); clusterConf.putAll(conf); client.init(clusterConf); KillOptions killOption = new KillOptions(); killOption.set_wait_secs(1); client.getClient().killTopologyWithOpts(topologyName, killOption); } finally { client.cleanup(); } }
} finally { if (client != null) { nimbusClient.cleanup();
client.cleanup(); client = null;
LOG.error("upload metrics error:", ex); if (client != null) { client.cleanup(); client = null;
Assert.fail("Fail to get workerSlots"); }finally { client.cleanup();
LOG.error(errorInfo, e); if (client != null) { client.cleanup();
public void reconnect() { cleanup(); try { init(this.conf); } catch (Exception ex) { LOG.error("reconnect error, maybe nimbus is not alive."); } }
public Map<String, Long> registerMetrics(Set<String> names) { if (!JStormMetrics.enabled) { return new HashMap<>(); } try { synchronized (lock){ if (client == null) { client = new NimbusClientWrapper(); client.init(conf); } } return client.getClient().registerMetrics(topologyId, names); } catch (Exception e) { LOG.error("Failed to gen metric ids", e); if (client != null) { client.cleanup(); client = null; } } return new HashMap<>(); }
private void uploadTaskHeatbeat(Map<Integer, TaskHeartbeat> tmpTaskHbMap) throws Exception { try { if (tmpTaskHbMap == null || tmpTaskHbMap.size() == 0) { return; } TopologyTaskHbInfo topologyTaskHbInfo = new TopologyTaskHbInfo(topologyId, taskId); topologyTaskHbInfo.set_taskHbs(tmpTaskHbMap); client.getClient().updateTaskHeartbeat(topologyTaskHbInfo); String info = ""; for (Entry<Integer, TaskHeartbeat> entry : topologyTaskHbInfo.get_taskHbs().entrySet()) { info += " " + entry.getKey() + "-" + entry.getValue().get_time(); } LOG.info("Update task heartbeat:" + info); } catch (Exception e) { String errorInfo = "Failed to update task heartbeat info"; LOG.error("Failed to update task heartbeat info ", e); if (client != null) { client.cleanup(); client = null; } zkCluster.report_task_error(context.getTopologyId(), context.getThisTaskId(), errorInfo, ErrorConstants.WARN, ErrorConstants.CODE_USER); } }
} finally { if (client != null) { nimbusClient.cleanup();
client.cleanup(); client = null;
LOG.error("upload metrics error:", ex); if (client != null) { client.cleanup(); client = null;
LOG.error("Failed to upload worker metrics ", e); if (client != null) { client.cleanup();