/** * Returns list of groups for a user * * @param user get groups for this user * @return list of groups for a given user */ @Override public Set<String> getGroups(String user) throws IOException { synchronized (this) { rotateIfNeeded(); if (cachedGroups.containsKey(user)) { return cachedGroups.get(user); } } Set<String> groups = getUnixGroups(user); if (!groups.isEmpty()) { synchronized (this) { cachedGroups.put(user, groups); } } return groups; }
@Override public void emitBatch(TransactionAttempt tx, Object coordinatorMeta, TridentCollector collector) { long txid = tx.getTransactionId(); long now = System.currentTimeMillis(); if (now - lastRotate > rotateTime) { Map<Long, List<Object>> failed = idsMap.rotate(); for (Long id : failed.keySet()) { //TODO: this isn't right... it's not in the map anymore fail(id); } lastRotate = now; } if (idsMap.containsKey(txid)) { fail(txid); } _collector.reset(collector); if (!prepared) { _spout.open(_conf, _context, new SpoutOutputCollector(_collector)); prepared = true; } for (int i = 0; i < _maxBatchSize; i++) { _spout.nextTuple(); if (_collector.numEmitted < i) { break; } } idsMap.put(txid, _collector.ids); _collector.pendingCount = idsMap.size(); }
@Override public void prepare(Map<String, Object> topoConf, TopologyContext context, OutputCollector collector) { this.collector = collector; this.pending = new RotatingMap<>(TIMEOUT_BUCKET_NUM); }
private static long getTopologyCleanupDetectedTime(String topologyId) { Long firstDetectedForDeletion = topologyCleanupDetected.get(topologyId); if (firstDetectedForDeletion == null) { firstDetectedForDeletion = Time.currentTimeMillis(); topologyCleanupDetected.put(topologyId, firstDetectedForDeletion); } return firstDetectedForDeletion; }
spoutOutputCollector.flush(); } else if (streamId.equals(Constants.SYSTEM_TICK_STREAM_ID)) { pending.rotate(); } else if (streamId.equals(Constants.METRICS_TICK_STREAM_ID)) { metricsTick(idToTask.get(taskId - idToTaskBase), tuple); TupleInfo pendingForId = pending.get(id); if (pendingForId != null) { pending.put(id, pendingForId); TupleInfo tupleInfo = pending.remove(id); if (tupleInfo != null && tupleInfo.getMessageId() != null) { if (taskId != tupleInfo.getTaskId()) {
Map<Long, List<Message>> failed = batchMessageMap.rotate(); for (Long id : failed.keySet()) { LOG.warn("TIMED OUT batch with transaction id " + id + " for " + name); if (batchMessageMap.containsKey(tx.getTransactionId())) { LOG.warn("FAILED duplicate batch with transaction id " + tx.getTransactionId() + "/" + tx.getAttemptId() + " for " + name); fail(tx.getTransactionId(), batchMessageMap.get(tx.getTransactionId())); name); batchMessageMap.put(tx.getTransactionId(), batchMessages);
public Object remove(K key) { synchronized (_lock) { return _rotatingMap.remove(key); } }
public void put(K key, V value) { synchronized (_lock) { _rotatingMap.put(key, value); } }
private void rotateIfNeeded() { long nowMs = Time.currentTimeMillis(); if (nowMs >= lastRotationMs + timeoutMs) { //Rotate once per timeout period that has passed since last time this was called. //This is necessary since this method may be called at arbitrary intervals. int rotationsToDo = (int) ((nowMs - lastRotationMs) / timeoutMs); for (int i = 0; i < rotationsToDo; i++) { cachedGroups.rotate(); } lastRotationMs = nowMs; } }
@Override protected void removeQuery(String id) { // Only update count if query was in queries or postFinishBuffer. if (queries.containsKey(id) || postFinishBuffer.containsKey(id)) { updateCount(activeQueriesCount, -1L); } queries.remove(id); postFinishBuffer.remove(id); bufferedMetadata.remove(id); // It should not be in the preStartBuffer under normal operations but could be if it was killed. preStartBuffer.remove(id); }
public V get(K key) { synchronized (_lock) { return _rotatingMap.get(key); } }
public boolean containsKey(K key) { synchronized (_lock) { return _rotatingMap.containsKey(key); } }
public int size() { synchronized (_lock) { return _rotatingMap.size(); } }
@Override public void execute(Tuple input) { if (TupleUtils.isTick(input)) { Map<Object, AckObject> tmp = pending.rotate(); LOG.debug("Number of timeout tuples:{}", tmp.size()); return; String streamId = input.getSourceStreamId(); Object id = input.getValue(0); AckObject curr = pending.get(id); if (ACKER_INIT_STREAM_ID.equals(streamId)) { if (curr == null) { curr = new AckObject(); pending.put(id, curr); if (curr == null) { curr = new AckObject(); pending.put(id, curr); pending.put(id, curr); } else if (ACKER_RESET_TIMEOUT_STREAM_ID.equals(streamId)) { resetTimeout = true; pending.put(id, curr); } else if (Constants.SYSTEM_FLUSH_STREAM_ID.equals(streamId)) { collector.flush(); Values tuple = new Values(id, getTimeDeltaMillis(curr.startTime)); if (curr.val == 0) { pending.remove(id);
Map<Long, List<Message>> failed = batchMessageMap.rotate(); for(Long id: failed.keySet()) { LOG.warn("TIMED OUT batch with transaction id "+id+" for "+name); if(batchMessageMap.containsKey(tx.getTransactionId())) { LOG.warn("FAILED duplicate batch with transaction id "+tx.getTransactionId()+"/"+tx.getAttemptId()+" for "+name); fail(tx.getTransactionId(), batchMessageMap.get(tx.getTransactionId())); LOG.trace("No items to acknowledge for batch with transaction id "+tx.getTransactionId()+"/"+tx.getAttemptId()+" for "+name); batchMessageMap.put(tx.getTransactionId(), batchMessages);
private void fail(long batchId) { List<Object> ids = (List<Object>) idsMap.remove(batchId); if (ids != null) { for (Object id : ids) { _spout.fail(id); } } }
pending.put(rootId, info); List<Object> ackInitTuple = new Values(rootId, Utils.bitXorVals(ackSeq), this.taskId); taskData.sendUnanchored(Acker.ACKER_INIT_STREAM_ID, ackInitTuple, executor.getExecutorTransfer(), executor.getPendingEmits());
public void run() { try { while (true) { Map<K, V> dead = null; Time.sleep(sleepTime); synchronized (_lock) { dead = _rotatingMap.rotate(); } if (_callback != null) { for (Entry<K, V> entry : dead.entrySet()) { _callback.expire(entry.getKey(), entry.getValue()); } } } } catch (InterruptedException ex) { } } });
@Override protected void removeQuery(String id) { // Only update count if query was in queries or postFinishBuffer. if (queries.containsKey(id) || postFinishBuffer.containsKey(id)) { updateCount(activeQueriesCount, -1L); } queries.remove(id); postFinishBuffer.remove(id); bufferedMetadata.remove(id); // It should not be in the preStartBuffer under normal operations but could be if it was killed. preStartBuffer.remove(id); }
public V get(K key) { synchronized(_lock) { return _rotatingMap.get(key); } }