@Override public Tuple peek() throws SQLException { if (tuple != null) { return tuple; } return delegate.peek(); }
public MaterializedComparableResultIterator(PeekingResultIterator delegate, Comparator<? super Tuple> c) throws SQLException { this.delegate = delegate; this.comparator = c; this.current = delegate.peek(); }
@Override public Tuple peek() throws SQLException { return spoolFrom.peek(); }
@Override public Tuple peek() throws SQLException { return delegate.peek(); }
@Override public Tuple next() throws SQLException { Tuple next = delegate.next(); this.current = delegate.peek(); return next; }
@Override public Tuple peek() throws SQLException { return getDelegate().peek(); } }
@Override public Tuple peek() throws SQLException { return getResultIterator().peek(); }
@Override public Tuple peek() throws SQLException { return currentIterator().peek(); }
@Override public Tuple peek() throws SQLException { return currentIterator().peek(); }
@Override public Tuple peek() throws SQLException { return getResultIterator().peek(); }
private PeekingResultIterator currentIterator() throws SQLException { List<PeekingResultIterator> iterators = getIterators(); while (index < iterators.size()) { PeekingResultIterator iterator = iterators.get(index); Tuple r = iterator.peek(); if (r != null) { return iterator; } iterator.close(); index++; } return EMPTY_ITERATOR; }
private PriorityQueue<MaterializedComparableResultIterator> getMinHeap() throws SQLException { if (minHeap == null) { List<PeekingResultIterator> iterators = resultIterators.getIterators(); minHeap = new PriorityQueue<MaterializedComparableResultIterator>(Math.max(1, iterators.size())); for (PeekingResultIterator itr : iterators) { if (itr.peek() == null) { itr.close(); continue; } minHeap.add(new MaterializedComparableResultIterator(itr, itrComparator)); } } return minHeap; }
@Override public PeekingResultIterator call() throws Exception { long startTime = System.currentTimeMillis(); if (logger.isDebugEnabled()) { logger.debug(LogUtil.addCustomAnnotations("Id: " + scanId + ", Time: " + (System.currentTimeMillis() - startTime) + "ms, Scan: " + scan, ScanUtil.getCustomAnnotations(scan))); } PeekingResultIterator iterator = iteratorFactory.newIterator(context, tableResultItr, scan, physicalTableName, ParallelIterators.this.plan); if (initFirstScanOnly) { if ((!isReverse && scanLocation.isFirstScan()) || (isReverse && scanLocation.isLastScan())) { // Fill the scanner's cache. This helps reduce latency since we are parallelizing the I/O needed. iterator.peek(); } } else { iterator.peek(); } allIterators.add(iterator); return iterator; }
private Tuple nextTuple() throws SQLException { List<PeekingResultIterator> iterators = getIterators(); while (index < iterators.size()) { PeekingResultIterator iterator = iterators.get(index); Tuple r = iterator.peek(); if (r != null) { return iterator.next(); } traversedIterator = true; iterator.close(); index++; } return null; }
@Override public Tuple next() throws SQLException { if(!CursorUtil.moreValues(cursorName)){ return null; } else if (fetchSize == rowsRead) { return null; } Tuple next = delegate.next(); CursorUtil.updateCursor(cursorName,next, delegate.peek()); rowsRead++; return next; }
private PeekingResultIterator currentIterator() throws SQLException { if (currentIterator == null) { return currentIterator = nextIterator(); } if (currentIterator.peek() == null) { currentIterator.close(); currentIterator = nextIterator(); } return currentIterator; }
PeekingResultIterator peekingItr = iteratorFactory.newIterator(context, itr, currentScan, tableName, plan); Tuple tuple; if ((tuple = peekingItr.peek()) == null) { peekingItr.close(); continue;
private ResultIterator peekForPersistentCache(ResultIterator iterator, ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { // The persistent subquery is optimistic and assumes caches are present on region // servers. We verify that this is the case by peeking at one result. If there is // a cache missing exception, we retry the query with the persistent cache disabled // for that specific cache ID. PeekingResultIterator peeking = LookAheadResultIterator.wrap(iterator); try { peeking.peek(); } catch (Exception e) { try { throw ServerUtil.parseServerException(e); } catch (HashJoinCacheNotFoundException e2) { Long cacheId = e2.getCacheId(); if (delegate.getContext().getRetryingPersistentCache(cacheId)) { throw e2; } delegate.getContext().setRetryingPersistentCache(cacheId); return iterator(scanGrouper, scan); } } return peeking; }
private PeekingResultIterator getResultIterator() throws SQLException { if (resultIterator.peek() == null && lastKey != null) { resultIterator.close(); scan = ScanUtil.newScan(scan); if(ScanUtil.isLocalIndex(scan)) { scan.setAttribute(SCAN_START_ROW_SUFFIX, ByteUtil.copyKeyBytesIfNecessary(lastKey)); } else { scan.setStartRow(ByteUtil.copyKeyBytesIfNecessary(lastKey)); } if (logger.isDebugEnabled()) logger.debug(LogUtil.addCustomAnnotations("Get next chunked result iterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan))); String tableName = tableRef.getTable().getPhysicalName().getString(); ReadMetricQueue readMetrics = context.getReadMetricsQueue(); ScanMetricsHolder scanMetricsHolder = ScanMetricsHolder.getInstance(readMetrics, tableName, scan, context.getConnection().getLogLevel()); long renewLeaseThreshold = context.getConnection().getQueryServices().getRenewLeaseThresholdMilliSeconds(); ResultIterator singleChunkResultIterator = new SingleChunkResultIterator(new TableResultIterator(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, plan, DefaultParallelScanGrouper.getInstance()), chunkSize); resultIterator = delegateIteratorFactory.newIterator(context, singleChunkResultIterator, scan, tableName, plan); } return resultIterator; }
@Override public Tuple next() throws SQLException { Tuple result = resultIterator.next(); if (result == null) { return null; } if (currentKey.get() == UNITIALIZED_KEY_BUFFER) { getGroupingKey(result, currentKey); } Aggregator[] rowAggregators = aggregators.getAggregators(); aggregators.reset(rowAggregators); while (true) { aggregators.aggregate(rowAggregators, result); Tuple nextResult = resultIterator.peek(); if (nextResult == null || !currentKey.equals(getGroupingKey(nextResult, nextKey))) { break; } result = resultIterator.next(); } byte[] value = aggregators.toBytes(rowAggregators); Tuple tuple = wrapKeyValueAsResult(PhoenixKeyValueUtil.newKeyValue(currentKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); currentKey.set(nextKey.get(), nextKey.getOffset(), nextKey.getLength()); return tuple; }