@Override protected void closeOp(boolean abort) throws HiveException { super.closeOp(abort); // We do not try to finish and flush an in-progress group because correct values require the // last group batch. }
@Override public void closeOp(boolean abort) throws HiveException { recordCounter.set(numRows); super.closeOp(abort); }
@Override public void closeOp(boolean abort) throws HiveException { if (!isMap && currCount < leastRow) { throw new HiveException("No sufficient row found"); } super.closeOp(abort); }
@Override protected final void closeOp(boolean abort) throws HiveException { priorityQueue.clear(); super.closeOp(abort); }
@Override public void closeOp(boolean abort) throws HiveException { if (!isMap && currCount < leastRow) { throw new HiveException("No sufficient row found"); } super.closeOp(abort); }
@Override public void closeOp(boolean abort) throws HiveException { recordCounter.set(numRows); super.closeOp(abort); }
/** * All done. * */ @Override public void closeOp(boolean abort) throws HiveException { closeOpCalled = true; for (AbstractRowContainer<List<Object>> alw : storage) { if (alw != null) { alw.clearRows(); // clean up the temp files } } Arrays.fill(storage, null); super.closeOp(abort); }
/** * All done. * */ @Override public void closeOp(boolean abort) throws HiveException { closeOpCalled = true; for (AbstractRowContainer<List<Object>> alw : storage) { if (alw != null) { alw.clearRows(); // clean up the temp files } } Arrays.fill(storage, null); super.closeOp(abort); }
@Override protected void closeOp(boolean abort) throws HiveException { super.closeOp(abort); ptfInvocation.finishPartition(); ptfInvocation.close(); }
@Override protected void closeOp(boolean abort) throws HiveException { conf.getGenericUDTF().close(); super.closeOp(abort); } }
@Override public void closeOp(boolean abort) throws HiveException { if (conf != null) { if (conf.isGatherStats() && stats.size() != 0) { publishStats(); } } super.closeOp(abort); }
@Override protected void closeOp(boolean abort) throws HiveException { conf.getGenericUDTF().close(); super.closeOp(abort); } }
@Override protected void closeOp(boolean abort) throws HiveException { super.closeOp(abort); ptfInvocation.finishPartition(); ptfInvocation.close(); }
closeOp(abort);
closeOp(abort);
@Override public void closeOp(boolean abort) throws HiveException { if (getExecContext() != null && getExecContext().getFileId() == null) { updateFileId(); } if (conf != null) { if (conf.isGatherStats() && stats.size() != 0) { publishStats(); } } super.closeOp(abort); }
/** * We need to forward all the aggregations to children. * */ @Override public void closeOp(boolean abort) throws HiveException { if (!abort) { try { // If there is no grouping key and no row came to this operator if (firstRow && GroupByOperator.shouldEmitSummaryRow(conf)) { firstRow = false; Object[] keys=new Object[outputKeyLength]; int pos = conf.getGroupingSetPosition(); if (pos >= 0 && pos < outputKeyLength) { keys[pos] = new LongWritable((1L << pos) - 1); } forward(keys, aggregations); } else { flush(); } } catch (Exception e) { throw new HiveException(e); } } hashAggregations = null; super.closeOp(abort); }
super.closeOp(abort);
@Override public void closeOp(boolean abort) throws HiveException { recordCounter.set(numRows); super.closeOp(abort); }
@Override protected void closeOp(boolean abort) throws HiveException { super.closeOp(abort); ptfInvocation.finishPartition(); ptfInvocation.close(); }