@Override public Aggregator[] aggregate(Tuple result) { Aggregator[] rowAggregators = aggregators.getAggregators(); aggregators.reset(rowAggregators); aggregators.aggregate(rowAggregators, result); return rowAggregators; } }
public Aggregators(SingleAggregateFunction[] functions, Aggregator[] aggregators, int minNullableIndex) { this.functions = functions; this.aggregators = aggregators; this.estimatedByteSize = calculateSize(aggregators); this.schema = newValueSchema(aggregators, minNullableIndex); this.valueSet = ValueBitSet.newInstance(schema); }
private HashMap<ImmutableBytesWritable, Aggregator[]> populateHash() throws SQLException { hash = new HashMap<ImmutableBytesWritable, Aggregator[]>(HASH_AGG_INIT_SIZE, 0.75f); final int aggSize = aggregators.getEstimatedByteSize(); long keySize = 0; for (Tuple result = resultIterator.next(); result != null; result = resultIterator.next()) { ImmutableBytesWritable key = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER); key = getGroupingKey(result, key); Aggregator[] rowAggregators = hash.get(key); if (rowAggregators == null) { keySize += key.getSize(); long hashSize = SizedUtil.sizeOfMap(hash.size() + 1, SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE, aggSize) + keySize; if (hashSize > memoryChunk.getSize() + CLIENT_HASH_AGG_MEMORY_CHUNK_SIZE) { // This will throw InsufficientMemoryException if necessary memoryChunk.resize(hashSize + CLIENT_HASH_AGG_MEMORY_CHUNK_SIZE); } rowAggregators = aggregators.newAggregators(); hash.put(key, rowAggregators); } aggregators.aggregate(rowAggregators, result); } return hash; }
@Override public Tuple next() throws SQLException { Tuple result = super.next(); // Ensure ungrouped aggregregation always returns a row, even if the underlying iterator doesn't. if (result == null && !hasRows) { // We should reset ClientAggregators here in case they are being reused in a new ResultIterator. aggregators.reset(aggregators.getAggregators()); byte[] value = aggregators.toBytes(aggregators.getAggregators()); result = new SingleKeyValueTuple( PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value)); } hasRows = true; return result; }
@Override public Tuple next() throws SQLException { Tuple result = resultIterator.next(); if (result == null) { return null; } if (currentKey.get() == UNITIALIZED_KEY_BUFFER) { getGroupingKey(result, currentKey); } Aggregator[] rowAggregators = aggregators.getAggregators(); aggregators.reset(rowAggregators); while (true) { aggregators.aggregate(rowAggregators, result); Tuple nextResult = resultIterator.peek(); if (nextResult == null || !currentKey.equals(getGroupingKey(nextResult, nextKey))) { break; } result = resultIterator.next(); } byte[] value = aggregators.toBytes(rowAggregators); Tuple tuple = wrapKeyValueAsResult(PhoenixKeyValueUtil.newKeyValue(currentKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); currentKey.set(nextKey.get(), nextKey.getOffset(), nextKey.getLength()); return tuple; }
aggregators = ServerAggregators.deserialize( scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), conf, em); rowAggregators = aggregators.getAggregators(); Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan); Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple(); indexMutations.clear(); aggregators.aggregate(rowAggregators, result); hasAny = true; Cell keyValue = null; if (hadAny) { byte[] value = aggregators.toBytes(rowAggregators); keyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
rowAggregators = aggregate(previous); aggregators.aggregate(rowAggregators, current); traversedIterators = this.traversedIterator; current = previous; } else { byte[] value = aggregators.toBytes(rowAggregators); current = new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(previousKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length));
byte[] value = aggregators.toBytes(rowAggregators); Tuple tuple = wrapKeyValueAsResult(PhoenixKeyValueUtil.newKeyValue(key, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); return tuple;
@Override public Tuple next() throws SQLException { Tuple result = resultIterator.next(); if (result == null) { return null; } if (currentKey.get() == UNITIALIZED_KEY_BUFFER) { getGroupingKey(result, currentKey); } Aggregator[] rowAggregators = aggregators.getAggregators(); aggregators.reset(rowAggregators); while (true) { aggregators.aggregate(rowAggregators, result); Tuple nextResult = resultIterator.peek(); if (nextResult == null || !currentKey.equals(getGroupingKey(nextResult, nextKey))) { break; } result = resultIterator.next(); } byte[] value = aggregators.toBytes(rowAggregators); Tuple tuple = wrapKeyValueAsResult(PhoenixKeyValueUtil.newKeyValue(currentKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); currentKey.set(nextKey.get(), nextKey.getOffset(), nextKey.getLength()); return tuple; }
@Override public Tuple next() throws SQLException { Tuple result = super.next(); // Ensure ungrouped aggregregation always returns a row, even if the underlying iterator doesn't. if (result == null && !hasRows) { // We should reset ClientAggregators here in case they are being reused in a new ResultIterator. aggregators.reset(aggregators.getAggregators()); byte[] value = aggregators.toBytes(aggregators.getAggregators()); result = new SingleKeyValueTuple( PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value)); } hasRows = true; return result; }
aggregators = ServerAggregators.deserialize( scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), conf, em); rowAggregators = aggregators.getAggregators(); Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan); Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple(); indexMutations.clear(); aggregators.aggregate(rowAggregators, result); hasAny = true; Cell keyValue = null; if (hadAny) { byte[] value = aggregators.toBytes(rowAggregators); keyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
rowAggregators = aggregate(previous); aggregators.aggregate(rowAggregators, current); traversedIterators = this.traversedIterator; current = previous; } else { byte[] value = aggregators.toBytes(rowAggregators); current = new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(previousKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length));
byte[] value = aggregators.toBytes(rowAggregators); Tuple tuple = wrapKeyValueAsResult(PhoenixKeyValueUtil.newKeyValue(key, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); return tuple;
@Override public Aggregator[] aggregate(Tuple result) { Aggregator[] rowAggregators = aggregators.getAggregators(); aggregators.reset(rowAggregators); aggregators.aggregate(rowAggregators, result); return rowAggregators; }
@Override public Tuple next() throws SQLException { Tuple result = resultIterator.next(); if (result == null) { return null; } if (currentKey.get() == UNITIALIZED_KEY_BUFFER) { getGroupingKey(result, currentKey); } Aggregator[] rowAggregators = aggregators.getAggregators(); aggregators.reset(rowAggregators); while (true) { aggregators.aggregate(rowAggregators, result); Tuple nextResult = resultIterator.peek(); if (nextResult == null || !currentKey.equals(getGroupingKey(nextResult, nextKey))) { break; } result = resultIterator.next(); } byte[] value = aggregators.toBytes(rowAggregators); Tuple tuple = wrapKeyValueAsResult(PhoenixKeyValueUtil.newKeyValue(currentKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); currentKey.set(nextKey.get(), nextKey.getOffset(), nextKey.getLength()); return tuple; }
@Override public Tuple next() throws SQLException { Tuple result = super.next(); // Ensure ungrouped aggregregation always returns a row, even if the underlying iterator doesn't. if (result == null && !hasRows) { // We should reset ClientAggregators here in case they are being reused in a new ResultIterator. aggregators.reset(aggregators.getAggregators()); byte[] value = aggregators.toBytes(aggregators.getAggregators()); result = new SingleKeyValueTuple( PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value)); } hasRows = true; return result; }
aggregators = ServerAggregators.deserialize( scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), conf, em); rowAggregators = aggregators.getAggregators(); Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan); Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple(); indexMutations.clear(); aggregators.aggregate(rowAggregators, result); hasAny = true; Cell keyValue = null; if (hadAny) { byte[] value = aggregators.toBytes(rowAggregators); keyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
private HashMap<ImmutableBytesWritable, Aggregator[]> populateHash() throws SQLException { hash = new HashMap<ImmutableBytesWritable, Aggregator[]>(HASH_AGG_INIT_SIZE, 0.75f); final int aggSize = aggregators.getEstimatedByteSize(); long keySize = 0; for (Tuple result = resultIterator.next(); result != null; result = resultIterator.next()) { ImmutableBytesWritable key = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER); key = getGroupingKey(result, key); Aggregator[] rowAggregators = hash.get(key); if (rowAggregators == null) { keySize += key.getSize(); long hashSize = SizedUtil.sizeOfMap(hash.size() + 1, SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE, aggSize) + keySize; if (hashSize > memoryChunk.getSize() + CLIENT_HASH_AGG_MEMORY_CHUNK_SIZE) { // This will throw InsufficientMemoryException if necessary memoryChunk.resize(hashSize + CLIENT_HASH_AGG_MEMORY_CHUNK_SIZE); } rowAggregators = aggregators.newAggregators(); hash.put(key, rowAggregators); } aggregators.aggregate(rowAggregators, result); } return hash; }
public Aggregators(SingleAggregateFunction[] functions, Aggregator[] aggregators, int minNullableIndex) { this.functions = functions; this.aggregators = aggregators; this.estimatedByteSize = calculateSize(aggregators); this.schema = newValueSchema(aggregators, minNullableIndex); this.valueSet = ValueBitSet.newInstance(schema); }
rowAggregators = aggregate(previous); aggregators.aggregate(rowAggregators, current); traversedIterators = this.traversedIterator; current = previous; } else { byte[] value = aggregators.toBytes(rowAggregators); current = new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(previousKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length));