@Override public void close() { if (logviewerCleanupTimer != null) { try { logviewerCleanupTimer.close(); } catch (Exception ex) { throw Utils.wrapInRuntime(ex); } } }
public static String errorPath(String stormId, String componentId) { try { return errorStormRoot(stormId) + ZK_SEPERATOR + URLEncoder.encode(componentId, "UTF-8"); } catch (UnsupportedEncodingException e) { throw Utils.wrapInRuntime(e); } }
@Override public IStateStorage mkStore(Map<String, Object> config, Map<String, Object> auth_conf, ClusterStateContext context) { try { return new ZKStateStorage(config, auth_conf, context); } catch (Exception e) { throw Utils.wrapInRuntime(e); } } }
private Runnable sleep() { return () -> { try { Thread.sleep(10); } catch (InterruptedException e) { throw Utils.wrapInRuntime(e); } }; }
@Override public NimbusInfo getLeader() { try { return Zookeeper.toNimbusInfo(leaderLatch.get().getLeader()); } catch (Exception e) { throw Utils.wrapInRuntime(e); } }
public void refreshConnections() { try { refreshConnections(() -> refreshConnectionsTimer.schedule(0, this::refreshConnections)); } catch (Exception e) { throw Utils.wrapInRuntime(e); } }
@Override public void report(Throwable error) { LOG.error("Error", error); if (Time.deltaSecs(intervalStartTime.get()) > errorIntervalSecs) { intervalErrors.set(0); intervalStartTime.set(Time.currentTimeSecs()); } if (intervalErrors.incrementAndGet() <= maxPerInterval) { try { stormClusterState.reportError(stormId, componentId, Utils.hostname(), workerTopologyContext.getThisWorkerPort().longValue(), error); } catch (UnknownHostException e) { throw Utils.wrapInRuntime(e); } } } }
public static Stat setData(CuratorFramework zk, String path, byte[] data) { try { String npath = normalizePath(path); return zk.setData().forPath(npath, data); } catch (Exception e) { throw Utils.wrapInRuntime(e); } }
public static void syncPath(CuratorFramework zk, String path) { try { zk.sync().forPath(normalizePath(path)); } catch (Exception e) { throw Utils.wrapInRuntime(e); } }
public WorkerTopologyContext getWorkerTopologyContext() { try { String codeDir = ConfigUtils.supervisorStormResourcesPath(ConfigUtils.supervisorStormDistRoot(conf, topologyId)); String pidDir = ConfigUtils.workerPidsRoot(conf, topologyId); return new WorkerTopologyContext(systemTopology, topologyConf, taskToComponent, componentToSortedTasks, componentToStreamToFields, topologyId, codeDir, pidDir, port, localTaskIds, defaultSharedResources, userSharedResources, cachedTaskToNodePort, assignmentId); } catch (IOException e) { throw Utils.wrapInRuntime(e); } }
public static Executor mkExecutor(WorkerState workerState, List<Long> executorId, Map<String, String> credentials) { Executor executor; WorkerTopologyContext workerTopologyContext = workerState.getWorkerTopologyContext(); List<Integer> taskIds = StormCommon.executorIdToTasks(executorId); String componentId = workerTopologyContext.getComponentId(taskIds.get(0)); String type = getExecutorType(workerTopologyContext, componentId); if (ClientStatsUtil.SPOUT.equals(type)) { executor = new SpoutExecutor(workerState, executorId, credentials); } else { executor = new BoltExecutor(workerState, executorId, credentials); } int minId = Integer.MAX_VALUE; Map<Integer, Task> idToTask = new HashMap<>(); for (Integer taskId : taskIds) { minId = Math.min(minId, taskId); try { Task task = new Task(executor, taskId); idToTask.put(taskId, task); } catch (IOException ex) { throw Utils.wrapInRuntime(ex); } } executor.idToTaskBase = minId; executor.idToTask = Utils.convertToArray(idToTask, minId); return executor; }
@Override public IStateStorage mkStore(Map<String, Object> config, Map<String, Object> auth_conf, ClusterStateContext context) { try { ZKStateStorageFactory zkfact = new ZKStateStorageFactory(); IStateStorage zkState = zkfact.mkStore(config, auth_conf, context); return new PaceMakerStateStorage(new PacemakerClientPool(config), zkState); } catch (Exception e) { throw Utils.wrapInRuntime(e); } } }
public static void deleteNode(CuratorFramework zk, String path) { try { String npath = normalizePath(path); if (existsNode(zk, npath, false)) { zk.delete().deletingChildrenIfNeeded().forPath(normalizePath(path)); } } catch (Exception e) { if (Utils.exceptionCauseIsInstanceOf(KeeperException.NodeExistsException.class, e)) { // do nothing LOG.info("delete {} failed.", path, e); } else { throw Utils.wrapInRuntime(e); } } }
public static String createNode(CuratorFramework zk, String path, byte[] data, CreateMode mode, List<ACL> acls) { String ret = null; try { String npath = normalizePath(path); ret = zk.create().creatingParentsIfNeeded().withMode(mode).withACL(acls).forPath(npath, data); } catch (Exception e) { throw Utils.wrapInRuntime(e); } return ret; }
public static List<String> getChildren(CuratorFramework zk, String path, boolean watch) { try { String npath = normalizePath(path); if (watch) { return zk.getChildren().watched().forPath(npath); } else { return zk.getChildren().forPath(npath); } } catch (Exception e) { throw Utils.wrapInRuntime(e); } }
public static boolean existsNode(CuratorFramework zk, String path, boolean watch) { Stat stat = null; try { if (watch) { stat = zk.checkExists().watched().forPath(normalizePath(path)); } else { stat = zk.checkExists().forPath(normalizePath(path)); } } catch (Exception e) { throw Utils.wrapInRuntime(e); } return stat != null; }
public static byte[] getData(CuratorFramework zk, String path, boolean watch) { try { String npath = normalizePath(path); if (existsNode(zk, npath, watch)) { if (watch) { return zk.getData().watched().forPath(npath); } else { return zk.getData().forPath(npath); } } } catch (Exception e) { if (Utils.exceptionCauseIsInstanceOf(KeeperException.NoNodeException.class, e)) { // this is fine b/c we still have a watch from the successful exists call } else { throw Utils.wrapInRuntime(e); } } return null; }
public void shutdownAllWorkers(UniFunc<Slot> onWarnTimeout, UniFunc<Slot> onErrorTimeout) { if (readState != null) { readState.shutdownAllWorkers(onWarnTimeout, onErrorTimeout); } else { try { ContainerLauncher launcher = ContainerLauncher.make(getConf(), getId(), getThriftServerPort(), getSharedContext(), getMetricsRegistry(), getContainerMemoryTracker()); killWorkers(SupervisorUtils.supervisorWorkerIds(conf), launcher); } catch (Exception e) { throw Utils.wrapInRuntime(e); } } }
public void ackSpoutMsg(SpoutExecutor executor, Task taskData, Long timeDelta, TupleInfo tupleInfo) { try { ISpout spout = (ISpout) taskData.getTaskObject(); int taskId = taskData.getTaskId(); if (executor.getIsDebug()) { LOG.info("SPOUT Acking message {} {}", tupleInfo.getId(), tupleInfo.getMessageId()); } spout.ack(tupleInfo.getMessageId()); if (!taskData.getUserContext().getHooks().isEmpty()) { // avoid allocating SpoutAckInfo obj if not necessary new SpoutAckInfo(tupleInfo.getMessageId(), taskId, timeDelta).applyOn(taskData.getUserContext()); } if (hasAckers && timeDelta != null) { executor.getStats().spoutAckedTuple(tupleInfo.getStream(), timeDelta, taskData.getTaskMetrics().getAcked(tupleInfo.getStream())); } } catch (Exception e) { throw Utils.wrapInRuntime(e); } }
public void failSpoutMsg(SpoutExecutor executor, Task taskData, Long timeDelta, TupleInfo tupleInfo, String reason) { try { ISpout spout = (ISpout) taskData.getTaskObject(); int taskId = taskData.getTaskId(); if (executor.getIsDebug()) { LOG.info("SPOUT Failing {} : {} REASON: {}", tupleInfo.getId(), tupleInfo, reason); } spout.fail(tupleInfo.getMessageId()); new SpoutFailInfo(tupleInfo.getMessageId(), taskId, timeDelta).applyOn(taskData.getUserContext()); if (timeDelta != null) { executor.getStats().spoutFailedTuple(tupleInfo.getStream(), timeDelta, taskData.getTaskMetrics().getFailed(tupleInfo.getStream())); } } catch (Exception e) { throw Utils.wrapInRuntime(e); } }