public void put(K key, V value) { synchronized (_lock) { _rotatingMap.put(key, value); } }
private static long getTopologyCleanupDetectedTime(String topologyId) { Long firstDetectedForDeletion = topologyCleanupDetected.get(topologyId); if (firstDetectedForDeletion == null) { firstDetectedForDeletion = Time.currentTimeMillis(); topologyCleanupDetected.put(topologyId, firstDetectedForDeletion); } return firstDetectedForDeletion; }
/** * Returns list of groups for a user * * @param user get groups for this user * @return list of groups for a given user */ @Override public Set<String> getGroups(String user) throws IOException { synchronized (this) { rotateIfNeeded(); if (cachedGroups.containsKey(user)) { return cachedGroups.get(user); } } Set<String> groups = getUnixGroups(user); if (!groups.isEmpty()) { synchronized (this) { cachedGroups.put(user, groups); } } return groups; }
name); batchMessageMap.put(tx.getTransactionId(), batchMessages);
TupleInfo pendingForId = pending.get(id); if (pendingForId != null) { pending.put(id, pendingForId);
if (curr == null) { curr = new AckObject(); pending.put(id, curr); if (curr == null) { curr = new AckObject(); pending.put(id, curr); pending.put(id, curr); } else if (ACKER_RESET_TIMEOUT_STREAM_ID.equals(streamId)) { resetTimeout = true; curr = new AckObject(); pending.put(id, curr); } else if (Constants.SYSTEM_FLUSH_STREAM_ID.equals(streamId)) { collector.flush();
@Override public void emitBatch(TransactionAttempt tx, Object coordinatorMeta, TridentCollector collector) { long txid = tx.getTransactionId(); long now = System.currentTimeMillis(); if (now - lastRotate > rotateTime) { Map<Long, List<Object>> failed = idsMap.rotate(); for (Long id : failed.keySet()) { //TODO: this isn't right... it's not in the map anymore fail(id); } lastRotate = now; } if (idsMap.containsKey(txid)) { fail(txid); } _collector.reset(collector); if (!prepared) { _spout.open(_conf, _context, new SpoutOutputCollector(_collector)); prepared = true; } for (int i = 0; i < _maxBatchSize; i++) { _spout.nextTuple(); if (_collector.numEmitted < i) { break; } } idsMap.put(txid, _collector.ids); _collector.pendingCount = idsMap.size(); }
pending.put(rootId, info); List<Object> ackInitTuple = new Values(rootId, Utils.bitXorVals(ackSeq), this.taskId); taskData.sendUnanchored(Acker.ACKER_INIT_STREAM_ID, ackInitTuple, executor.getExecutorTransfer(), executor.getPendingEmits());
new TrackedBatch(new BatchInfo(batchGroup, id, _bolt.initBatchState(batchGroup, id)), _coordConditions.get(batchGroup), id.getAttemptId()); _batches.put(id.getId(), tracked);
public void put(K key, V value) { synchronized(_lock) { _rotatingMap.put(key, value); } }
private void setupQuery(String id, String query, Metadata metadata, Querier querier) { updateCount(createdQueriesCount, 1L); bufferedMetadata.put(id, metadata); // If the query should be post-finish buffered, it should not be pre-start delayed. if (querier.shouldBuffer()) { queries.put(id, querier); updateCount(activeQueriesCount, 1L); log.info("Received and started query {}", querier.toString()); } else { preStartBuffer.put(id, querier); log.info("Received but delaying starting query {}", id); } }
private void setupQuery(String id, String query, Metadata metadata, Querier querier) { updateCount(createdQueriesCount, 1L); bufferedMetadata.put(id, metadata); // If the query should be post-finish buffered, it should not be pre-start delayed. if (querier.shouldBuffer()) { queries.put(id, querier); updateCount(activeQueriesCount, 1L); log.info("Received and started query {}", querier.toString()); } else { preStartBuffer.put(id, querier); log.info("Received but delaying starting query {}", id); } }
private void emitOrBufferFinished(String id, Querier querier) { /* * Three cases: * 1) If we shouldn't buffer, then emit it and return. If it was being delayed and somehow finished, it is * cleaned up and removed. There should be no query that needs delaying AND buffering. * 2) If the query became closed after it finished (wherever it is), we emit it. We should still honor isClosed. * 3) If it should buffer and it isn't closed, postFinishBuffer it till it becomes closed or ticks emit it. */ if (!querier.shouldBuffer()) { log.debug("Emitting query since it shouldn't be buffered {}", id); emitFinished(id, querier); } else if (querier.isClosed()) { log.debug("Emitting query since it finished but this is the last window for it {}", id); emitFinished(id, querier); } else if (queries.containsKey(id)) { log.debug("Starting to buffer while waiting for more final results for query {}", id); queries.remove(id); postFinishBuffer.put(id, querier); } }
private void emitOrBufferFinished(String id, Querier querier) { /* * Three cases: * 1) If we shouldn't buffer, then emit it and return. If it was being delayed and somehow finished, it is * cleaned up and removed. There should be no query that needs delaying AND buffering. * 2) If the query became closed after it finished (wherever it is), we emit it. We should still honor isClosed. * 3) If it should buffer and it isn't closed, postFinishBuffer it till it becomes closed or ticks emit it. */ if (!querier.shouldBuffer()) { log.debug("Emitting query since it shouldn't be buffered {}", id); emitFinished(id, querier); } else if (querier.isClosed()) { log.debug("Emitting query since it finished but this is the last window for it {}", id); emitFinished(id, querier); } else if (queries.containsKey(id)) { log.debug("Starting to buffer while waiting for more final results for query {}", id); queries.remove(id); postFinishBuffer.put(id, querier); } }
LOG.trace("No items to acknowledge for batch with transaction id "+tx.getTransactionId()+"/"+tx.getAttemptId()+" for "+name); batchMessageMap.put(tx.getTransactionId(), batchMessages);
LOG.trace("No items to acknowledge for batch with transaction id "+tx.getTransactionId()+"/"+tx.getAttemptId()+" for "+name); batchMessageMap.put(tx.getTransactionId(), batchMessages);
idsMap.put(txid, _collector.ids); _collector.pendingCount = idsMap.size();
_batches.put(id.getId(), tracked);