/** * Attaches a stage that performs the given cogroup-and-aggregate operation * over the items from both this stage and {@code stage1} you supply. It * emits one key-value pair (in a {@code Map.Entry}) for each distinct key * it observes in its input. The value is the result of the aggregate * operation across all the items with the given grouping key. * <p> * This variant requires you to provide a two-input aggregate operation * (refer to its {@linkplain AggregateOperation2 Javadoc} for a simple * example). If you can express your logic in terms of two single-input * aggregate operations, one for each input stream, then you should use * {@link #aggregate2(AggregateOperation1, BatchStageWithKey, AggregateOperation1) * stage0.aggregate2(aggrOp0, stage1, aggrOp1)} because it offers a simpler * API and you can use the already defined single-input operations. Use * this variant only when you have the need to implement an aggregate * operation that combines the input streams into the same accumulator. * * @see com.hazelcast.jet.aggregate.AggregateOperations AggregateOperations * @param aggrOp the aggregate operation to perform * @param <T1> type of items in {@code stage1} * @param <R> type of the aggregation result */ @Nonnull default <T1, R> BatchStage<Entry<K, R>> aggregate2( @Nonnull BatchStageWithKey<T1, ? extends K> stage1, @Nonnull AggregateOperation2<? super T, ? super T1, ?, R> aggrOp ) { return aggregate2(stage1, aggrOp, Util::entry); }
/** * Attaches a stage that performs the given cogroup-and-aggregate * transformation of the items from both this stage and {@code stage1} * you supply. For each distinct grouping key it observes in the input, it * performs the supplied aggregate operation across all the items sharing * that key. It performs the aggregation separately for each input stage: * {@code aggrOp0} on this stage and {@code aggrOp1} on {@code stage1}. * Once it has received all the items, it emits for each distinct key a * {@code Map.Entry(key, Tuple2(result0, result1))}. * * @see com.hazelcast.jet.aggregate.AggregateOperations AggregateOperations * * @param aggrOp0 aggregate operation to perform on this stage * @param stage1 the other stage * @param aggrOp1 aggregate operation to perform on the other stage * @param <R0> type of the aggregation result for stream-0 * @param <T1> type of items in {@code stage1} * @param <R1> type of the aggregation result for stream-1 */ @Nonnull default <T1, R0, R1> BatchStage<Entry<K, Tuple2<R0, R1>>> aggregate2( @Nonnull AggregateOperation1<? super T, ?, ? extends R0> aggrOp0, @Nonnull BatchStageWithKey<? extends T1, ? extends K> stage1, @Nonnull AggregateOperation1<? super T1, ?, ? extends R1> aggrOp1 ) { AggregateOperation2<? super T, ? super T1, ?, Tuple2<R0, R1>> aggrOp = aggregateOperation2(aggrOp0, aggrOp1, Tuple2::tuple2); return aggregate2(stage1, aggrOp, Util::entry); }
@Nonnull DistributedTriFunction<? super K, ? super R0, ? super R1, OUT> mapToOutputFn ) { return aggregate2(stage1, aggregateOperation2(aggrOp0, aggrOp1, Tuple2::tuple2), (key, tuple) -> mapToOutputFn.apply(key, tuple.f0(), tuple.f1()));