public void ack() { for (Tuple t : tupleBatch) { collector.ack(t); } tupleBatch.clear(); forceFlush = false; }
private void handleAck(Object id) { Tuple acked = _inputs.remove(id); if (acked == null) { throw new RuntimeException("Acked a non-existent or already acked/failed id: " + id); } _collector.ack(acked); }
private void ack(List<Tuple> tuples) { if (!tuples.isEmpty()) { LOG.debug("Acking {} tuples", tuples.size()); for (Tuple tuple : tuples) { collector.delegate.ack(tuple); } tuples.clear(); } }
@Override public void execute(Tuple input) { if (state) { collector.ack(input); } state = !state; }
private void ackCommittedTuples() { List<Tuple> toAckTuples = getQueuedTuples(); for (Tuple tuple : toAckTuples) { collector.ack(tuple); } }
private void tryEmitAndAck(Collection<Values> values, Tuple tuple) { for (Values value : values) { collector.emit(tuple, value); } collector.ack(tuple); }
/** * Calls {@link ExecutionResultHandler#onQuerySuccess(org.apache.storm.task.OutputCollector, org.apache.storm.tuple.Tuple)} before * acknowledging an single input tuple. */ @Override public void handle(OutputCollector collector, ExecutionResultHandler handler) { for (Tuple t : inputs) handler.onQuerySuccess(collector, t); for (Tuple t : inputs) collector.ack(t); } }
@Override public void execute(Tuple tuple) { Long now = System.currentTimeMillis(); Long then = (Long) tuple.getValues().get(0); LOG.warn("Latency {} ", now - then); System.err.println(now - then); collector.ack(tuple); }
@Override public void execute(Tuple tuple) { log.info("Received tuple : {}", tuple.getValue(0)); count++; if (count == 3) { collector.fail(tuple); } else { collector.ack(tuple); } }
public void execute(Tuple input) { String component = input.getSourceComponent(); Map<String, List<FixedTuple>> captured = emitted_tuples.get(_name); if (!captured.containsKey(component)) { captured.put(component, new ArrayList<FixedTuple>()); } captured.get(component).add(new FixedTuple(input.getSourceStreamId(), input.getValues())); _collector.ack(input); }
/** * Forwards the checkpoint tuple downstream. * * @param checkpointTuple the checkpoint tuple * @param action the action (prepare, commit, rollback or initstate) * @param txid the transaction id. */ protected void handleCheckpoint(Tuple checkpointTuple, Action action, long txid) { collector.emit(CHECKPOINT_STREAM_ID, checkpointTuple, new Values(txid, action)); collector.ack(checkpointTuple); }
private void countObjAndAck(Tuple tuple) { Object obj = tuple.getValue(0); counter.incrementCount(obj); collector.ack(tuple); }
private void ack(RefCountedTuple tuple) { if (tuple.shouldAck()) { LOG.debug("ACKing tuple {}", tuple); outputCollector.ack(tuple.tuple()); tuple.setAcked(); } }
public void execute(Tuple input) { Integer sourceId = input.getInteger(0); Long eventId = input.getLong(1); Long recentEvent = recentEventId.get(sourceId); if (null != recentEvent && eventId <= recentEvent) { String error = "Error: event id is not in strict order! event source Id: " + sourceId + ", last event Id: " + recentEvent + ", current event Id: " + eventId; _collector.emit(input, new Values(error)); } recentEventId.put(sourceId, eventId); _collector.ack(input); }
@Override public void execute(Tuple input) { IMetricsConsumer.TaskInfo taskInfo = (IMetricsConsumer.TaskInfo) input.getValue(0); Collection<IMetricsConsumer.DataPoint> dataPoints = (Collection) input.getValue(1); Collection<IMetricsConsumer.DataPoint> expandedDataPoints = _expander.expandDataPoints(dataPoints); List<IMetricsConsumer.DataPoint> filteredDataPoints = getFilteredDataPoints(expandedDataPoints); MetricsTask metricsTask = new MetricsTask(taskInfo, filteredDataPoints); while (!_taskQueue.offer(metricsTask)) { _taskQueue.poll(); } _collector.ack(input); }
@Override public void execute(Tuple input) { ArrayList<Tuple> anchors = new ArrayList<>(); anchors.add(input); anchors.add(input); collector.emit(anchors, new Values(1)); collector.ack(input); } }
@Test public void testCommit() throws Exception { Mockito.when(mockTuple.getSourceStreamId()).thenReturn("default"); executor.execute(mockTuple); Mockito.when(mockCheckpointTuple.getSourceStreamId()).thenReturn(CheckpointSpout.CHECKPOINT_STREAM_ID); Mockito.when(mockCheckpointTuple.getValueByField(CHECKPOINT_FIELD_ACTION)).thenReturn(COMMIT); Mockito.when(mockCheckpointTuple.getLongByField(CHECKPOINT_FIELD_TXID)).thenReturn(new Long(0)); Mockito.doNothing().when(mockOutputCollector).ack(mockCheckpointTuple); executor.execute(mockCheckpointTuple); Mockito.verify(mockBolt, Mockito.times(1)).preCommit(new Long(0)); Mockito.verify(mockState, Mockito.times(1)).commit(new Long(0)); }
@Test public void testHandleTuple() throws Exception { Mockito.when(mockTuple.getSourceStreamId()).thenReturn("default"); executor.execute(mockTuple); Mockito.when(mockCheckpointTuple.getSourceStreamId()).thenReturn(CheckpointSpout.CHECKPOINT_STREAM_ID); Mockito.when(mockCheckpointTuple.getValueByField(CHECKPOINT_FIELD_ACTION)).thenReturn(INITSTATE); Mockito.when(mockCheckpointTuple.getLongByField(CHECKPOINT_FIELD_TXID)).thenReturn(new Long(0)); Mockito.doNothing().when(mockOutputCollector).ack(mockCheckpointTuple); executor.execute(mockCheckpointTuple); Mockito.verify(mockBolt, Mockito.times(1)).execute(mockTuple); Mockito.verify(mockBolt, Mockito.times(1)).initState(Mockito.any(KeyValueState.class)); }
@Override public void execute(Tuple tuple) { String orig = tuple.getString(0); String ret = getFromCache(orig); if (ret == null) { ret = orig + "!!!"; addToCache(orig, ret); } _collector.emit(tuple, new Values(ret)); _collector.ack(tuple); }
@Test public void testTwoTuplesOneFile() throws IOException { HdfsBolt bolt = makeHdfsBolt(hdfsURI, 2, 10000f); bolt.prepare(new Config(), topologyContext, collector); bolt.execute(tuple1); verifyZeroInteractions(collector); bolt.execute(tuple2); verify(collector).ack(tuple1); verify(collector).ack(tuple2); Assert.assertEquals(1, countNonZeroLengthFiles(testRoot)); }