@Override public void nextTuple() { Utils.sleep(100); String[] sentences = new String[] { "the cow jumped over the moon", "an apple a day keeps the doctor away", "four score and seven years ago", "snow white and the seven dwarfs", "i am at two with nature" }; String sentence = sentences[_rand.nextInt(sentences.length)]; _collector.emit(new Values(sentence)); }
@Override public List<Integer> emit(String ignore, List<Object> values, Object msgId) { long batchIdVal = _rand.nextLong(); Object batchId = new RichSpoutBatchId(batchIdVal); FinishCondition finish = new FinishCondition(); finish.msgId = msgId; List<Integer> tasks = new ArrayList<>(); _collector.emit(_stream, new ConsList(batchId, values), new CollectorCb(tasks)); _collector.flush(); Set<Integer> outTasksSet = new HashSet<>(tasks); for(Integer t: _outputTasks) { int count = 0; if(outTasksSet.contains(t)) { count = 1; } long r = _rand.nextLong(); _collector.emitDirect(t, _coordStream, new Values(batchId, count), r); finish.vals.add(r); _msgIdToBatchId.put(r, batchIdVal); } _finishConditions.put(batchIdVal, finish); return tasks; }
@Override public void reportError(Throwable error) { _collector.reportError(error); } }
@Override public void nextTuple() { int n = tupleNumPerBatch; while (--n >= 0) { collector.emit(new Values(CHOICES[index])); index = (++index) % CHOICES.length; } collector.emitBarrier(); printSendTps(tupleNumPerBatch); }
@Override public void close() { if (JStormUtils.isKilledStatus(context)){ JStormUtils.sleepMs(10); int sendNum = 0; for (List<Integer> tasks : _targetTasks.values()) { for (Integer task : tasks) { sendNum++; _collector.emitDirect(task, new Values()); } } _collector.flush(); LOG.info("this component already sent {} finish messages", sendNum); JStormUtils.sleepMs(100); } }
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { _delegate.open(conf, context, new SpoutOutputCollector(new StreamOverrideCollector(collector))); _outputTasks = new ArrayList<>(); for(String component: Utils.get(context.getThisTargets(), _coordStream, new HashMap<String, Grouping>()).keySet()) { _outputTasks.addAll(context.getComponentTasks(component)); } _rand = new Random(Utils.secureRandomLong()); }
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { SpoutOutputCollectorCb ackOutput = new AckSpoutOutputCollector(collector.getDelegate()); spoutExecutor.open(conf, context, new SpoutOutputCollector(ackOutput)); tracker = new AckPendingBatchTracker<>(); taskStats = new TaskBaseMetric(context.getTopologyId(), context.getThisComponentId(), context.getThisTaskId()); random = new Random(Utils.secureRandomLong()); }
private void sync() { // note that sometimes the tuples active may be less than max_spout_pending, e.g. // max_spout_pending = 3 // tx 1, 2, 3 active, tx 2 is acked. there won't be a commit for tx 2 (because tx 1 isn't committed yet), // and there won't be a batch for tx 4 because there's max_spout_pending tx active TransactionStatus maybeCommit = _activeTx.get(_currTransaction); if (maybeCommit != null && maybeCommit.status == AttemptStatus.PROCESSED) { maybeCommit.status = AttemptStatus.COMMITTING; LOG.debug("send commit stream {}", maybeCommit); _collector.emit(TRANSACTION_COMMIT_STREAM_ID, new Values(maybeCommit.attempt), maybeCommit.attempt); } try { if (_activeTx.size() < _maxTransactionActive) { BigInteger curr = _currTransaction; for (int i = 0; i < _maxTransactionActive; i++) { if ((_coordinatorState.hasCache(curr) || _coordinator.isReady()) && !_activeTx.containsKey(curr)) { TransactionAttempt attempt = new TransactionAttempt(curr, _rand.nextLong()); Object state = _coordinatorState.getState(curr, _initializer); _activeTx.put(curr, new TransactionStatus(attempt)); LOG.debug("send batch stream {}", attempt); _collector.emit(TRANSACTION_BATCH_STREAM_ID, new Values(attempt, state, previousTransactionId(_currTransaction)), attempt); } curr = nextTransactionId(curr); } } } catch (FailedException e) { LOG.warn("Failed to get metadata for a transaction", e); } }
returnInfo.put("port", client.getPort()); gotRequest = true; _collector.emit(new Values(req.get_func_args(), JSONValue.toJSONString(returnInfo)), new DRPCMessageId(req.get_request_id(), i)); break; returnInfo.put("port", 0); gotRequest = true; _collector.emit(new Values(req.get_func_args(), JSONValue.toJSONString(returnInfo)), new DRPCMessageId(req.get_request_id(), 0)); Utils.sleep(1);
@Override public void nextTuple() { if (reader == null) { try { reader = new BufferedReader(new FileReader(sourceFile)); } catch (FileNotFoundException e) { collector.reportError(e); } } try { if ((temp = reader.readLine()) != null) { System.err.println("Reading rule update:" + temp); collector.emit(new Values(temp)); } else { Thread.sleep(2000); } } catch (IOException | InterruptedException e) { collector.reportError(e); } }
private void sync() { // note that sometimes the tuples active may be less than max_spout_pending, e.g. // max_spout_pending = 3 // tx 1, 2, 3 active, tx 2 is acked. there won't be a commit for tx 2 (because tx 1 isn't committed yet), // and there won't be a batch for tx 4 because there's max_spout_pending tx active TransactionStatus maybeCommit = _activeTx.get(_currTransaction); if(maybeCommit!=null && maybeCommit.status == AttemptStatus.PROCESSED) { maybeCommit.status = AttemptStatus.COMMITTING; _collector.emit(TRANSACTION_COMMIT_STREAM_ID, new Values(maybeCommit.attempt), maybeCommit.attempt); } try { if(_activeTx.size() < _maxTransactionActive) { BigInteger curr = _currTransaction; for(int i=0; i<_maxTransactionActive; i++) { if(!_activeTx.containsKey(curr)) { TransactionAttempt attempt = new TransactionAttempt(curr, Utils.randomLong()); Object state = _coordinatorState.getState(curr, _initializer); _activeTx.put(curr, new TransactionStatus(attempt)); _collector.emit(TRANSACTION_BATCH_STREAM_ID, new Values(attempt, state, previousTransactionId(_currTransaction)), attempt); } curr = nextTransactionId(curr); } } } catch(FailedException e) { LOG.warn("Failed to get metadata for a transaction", e); } }
/** * Emits a tuple to the default output stream with a null message id. Storm * will not track this message so ack and fail will never be called for this * tuple. The emitted values must be immutable. */ public List<Integer> emit(List<Object> tuple) { return emit(tuple, null); }
Object messageId = shellMsg.getId(); if (task == 0) { _collector.emit(stream, tuple, messageId, new ShellEmitCb(shellMsg)); } else { _collector.emitDirect((int) task.longValue(), stream, tuple, messageId);
public EmitState emit(SpoutOutputCollector collector) { if (emittingMessages.isEmpty()) { fillMessages(); } int count = 0; while (true) { MessageAndOffset toEmitMsg = emittingMessages.pollFirst(); if (toEmitMsg == null) { return EmitState.EMIT_END; } count ++; Iterable<List<Object>> tups = generateTuples(toEmitMsg.message()); if (tups != null) { for (List<Object> tuple : tups) { LOG.debug("emit message {}", new String(Utils.toByteArray(toEmitMsg.message().payload()))); collector.emit(tuple, new KafkaMessageId(partition, toEmitMsg.offset())); } if(count>=config.batchSendCount) { break; } } else { ack(toEmitMsg.offset()); } } if (emittingMessages.isEmpty()) { return EmitState.EMIT_END; } else { return EmitState.EMIT_MORE; } }
/** * Emits a tuple to the specified task on the default output stream. This * output stream must have been declared as a direct stream, and the * specified task must use a direct grouping on this stream to receive the * message. The emitted values must be immutable. */ public void emitDirect(int taskId, List<Object> tuple, Object messageId) { emitDirect(taskId, Utils.DEFAULT_STREAM_ID, tuple, messageId); }
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { _tracker = new SpoutTrackOutputCollector(collector); _delegate.open(conf, context, new SpoutOutputCollector(_tracker)); }