@SuppressWarnings("unchecked") @Override public Iterator<Tuple2<HiveKey, BytesWritable>> call(Iterator<Tuple2<HiveKey, V>> it) throws Exception { initJobConf(); SparkReduceRecordHandler reducerRecordhandler = new SparkReduceRecordHandler(); HiveReduceFunctionResultList<V> result = new HiveReduceFunctionResultList<V>(it, reducerRecordhandler); reducerRecordhandler.init(jobConf, result, sparkReporter); return result; }
@Override protected void closeRecordProcessor() { reduceRecordHandler.close(); } }
@Override protected void processNextRecord(Tuple2<HiveKey, V> inputRecord) throws IOException { HiveKey key = inputRecord._1(); V value = inputRecord._2(); if (value instanceof Iterable) { @SuppressWarnings("unchecked") Iterable<BytesWritable> values = (Iterable<BytesWritable>)value; reduceRecordHandler.<BytesWritable>processRow(key, values.iterator()); } else { reduceRecordHandler.processRow(key, value); } }
/** * Process one row using a dummy iterator. Or, add row to vector batch. */ @Override public void processRow(Object key, final Object value) throws IOException { if (!anyRow) { anyRow = true; } if (vectorized) { processVectorRow(key, value); } else { dummyIterator.setValue(value); processRow(key, dummyIterator); } }
processVectors(values, tag); } else { processKeyValues(values, tag);
Object valueObj = deserializeValue(valueWritable, tag); batchBytes = 0; if (isLogInfoEnabled) { logMemoryInfo(); logMemoryInfo();
processVectorRows(key, values); return; reducer.startGroup(); processKeyValues(values, tag);
row.add(valueObject[tag]); if (isLogInfoEnabled) { logMemoryInfo();
processVectors(values, tag); } else { processKeyValues(values, tag);
Object valueObj = deserializeValue(valueWritable, tag); rowIdx = 0; if (isLogInfoEnabled) { logMemoryInfo(); logMemoryInfo();
row.add(valueObject[tag]); if (isLogInfoEnabled) { logMemoryInfo();
@SuppressWarnings("unchecked") @Override public Iterator<Tuple2<HiveKey, BytesWritable>> call(Iterator<Tuple2<HiveKey, V>> it) throws Exception { initJobConf(); SparkReduceRecordHandler reducerRecordhandler = new SparkReduceRecordHandler(); HiveReduceFunctionResultList<V> result = new HiveReduceFunctionResultList<V>(it, reducerRecordhandler); reducerRecordhandler.init(jobConf, result, sparkReporter); return result; }
/** * Process one row using a dummy iterator. */ @Override public void processRow(Object key, final Object value) throws IOException { dummyIterator.setValue(value); processRow(key, dummyIterator); }
@Override protected void closeRecordProcessor() { reduceRecordHandler.close(); } }
@SuppressWarnings("unchecked") @Override public Iterable<Tuple2<HiveKey, BytesWritable>> call(Iterator<Tuple2<HiveKey, Iterable<BytesWritable>>> it) throws Exception { initJobConf(); SparkReduceRecordHandler reducerRecordhandler = new SparkReduceRecordHandler(); HiveReduceFunctionResultList result = new HiveReduceFunctionResultList(it, reducerRecordhandler); reducerRecordhandler.init(jobConf, result, sparkReporter); return result; }
@Override protected void processNextRecord(Tuple2<HiveKey, V> inputRecord) throws IOException { HiveKey key = inputRecord._1(); V value = inputRecord._2(); if (value instanceof Iterable) { @SuppressWarnings("unchecked") Iterable<BytesWritable> values = (Iterable<BytesWritable>)value; reduceRecordHandler.<BytesWritable>processRow(key, values.iterator()); } else { reduceRecordHandler.processRow(key, value); } }
@Override protected void closeRecordProcessor() { reduceRecordHandler.close(); } }
@Override protected void processNextRecord(Tuple2<HiveKey, Iterable<BytesWritable>> inputRecord) throws IOException { reduceRecordHandler.processRow(inputRecord._1(), inputRecord._2().iterator()); }