@Override public boolean next(List<Cell> cells) throws IOException { return next(cells, batchSize); }
@Override public boolean nextRaw(List<Cell> cells, int limit) throws IOException { return nextInternal(cells, limit); }
@Override public ReturnCode filterKeyValue(Cell cell) throws IOException { if (IncrementHandler.isIncrement(cell)) { // all visible increments should be included until we get to a non-increment return ReturnCode.INCLUDE; } else { // as soon as we find a KV to include we can move to the next column return ReturnCode.INCLUDE_AND_NEXT_COL; } } }
@Override public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> ctx, Get get, List<Cell> results) throws IOException { Scan scan = new Scan(get); scan.setMaxVersions(); scan.setFilter(Filters.combine(new IncrementFilter(), scan.getFilter())); RegionScanner scanner = null; try { scanner = new IncrementSummingScanner(region, scan.getBatch(), region.getScanner(scan), ScanType.USER_SCAN); scanner.next(results); ctx.bypass(); } finally { if (scanner != null) { scanner.close(); } } }
@Override public RegionScanner postScannerOpen(ObserverContext<RegionCoprocessorEnvironment> ctx, Scan scan, RegionScanner scanner) throws IOException { return new IncrementSummingScanner(region, scan.getBatch(), scanner, ScanType.USER_SCAN); }
/** * Creates a new instance of the {@link Filter}. * @param tx the current transaction to apply. Only data visible to this transaction will be returned * @param ttlByFamily map of time-to-live (TTL) (in milliseconds) by column family name * @param allowEmptyValues if {@code true} cells with empty {@code byte[]} values will be returned, if {@code false} * these will be interpreted as "delete" markers and the column will be filtered out * @param scanType the type of scan operation being performed * @param cellFilter if non-null, this filter will be applied to all cells visible to the current transaction, by * calling {@link Filter#filterKeyValue(org.apache.hadoop.hbase.Cell)}. If null, then * {@link Filter.ReturnCode#INCLUDE_AND_NEXT_COL} will be returned instead. */ public IncrementTxFilter(Transaction tx, Map<byte[], Long> ttlByFamily, boolean allowEmptyValues, ScanType scanType, Filter cellFilter) { super(tx, ttlByFamily, allowEmptyValues, scanType, Filters.combine(new IncrementFilter(), cellFilter)); }
@Override public RegionWrapper createRegion(TableId tableId, Map<String, String> familyProperties) throws Exception { HColumnDescriptor columnDesc = new HColumnDescriptor(FAMILY); columnDesc.setMaxVersions(Integer.MAX_VALUE); for (Map.Entry<String, String> prop : familyProperties.entrySet()) { columnDesc.setValue(prop.getKey(), prop.getValue()); } return new HBase98RegionWrapper( IncrementSummingScannerTest.createRegion(TEST_HBASE.getConfiguration(), cConf, tableId, columnDesc)); }
IncrementSummingScanner(HRegion region, int batchSize, InternalScanner internalScanner, ScanType scanType, long compationUpperBound, long oldestTsByTTL) { this.region = region; this.batchSize = batchSize; this.baseScanner = new WrappedScanner(internalScanner); if (internalScanner instanceof RegionScanner) { this.baseRegionScanner = (RegionScanner) internalScanner; } this.scanType = scanType; this.compactionUpperBound = compationUpperBound; this.oldestTsByTTL = oldestTsByTTL; }
@Override public boolean nextRaw(List<Cell> cells) throws IOException { return nextRaw(cells, batchSize); }
/** * Returns the next available cell for the current row and advances the pointer to the next cell. This method * can be called multiple times in a row to advance through all the available cells. * * @param limit the limit of number of cells to return if the next batch must be fetched by the wrapped scanner * @return the next available cell or null if no more cells are available for the current row * @throws IOException */ public Cell nextCell(int limit) throws IOException { Cell cell = peekNextCell(limit); if (cell != null) { currentIdx++; } return cell; }
@Override public void close() throws IOException { baseScanner.close(); }
@Override public boolean scanRegion(List<ColumnCell> results, byte[] startRow) throws IOException { return scanRegion(results, startRow, null); }
private void verifyCounts(HRegion region, Scan scan, long[] counts) throws Exception { verifyCounts(region, scan, counts, -1); }
@Override protected Filter getTransactionFilter(Transaction tx, ScanType scanType, Filter filter) { IncrementTxFilter incrementTxFilter = new IncrementTxFilter(tx, ttlByFamily, allowEmptyValues, scanType, filter); return new CellSkipFilter(incrementTxFilter); } }
private HRegion createRegion(TableId tableId, byte[] family) throws Exception { return createRegion(conf, cConf, tableId, new HColumnDescriptor(family)); }
@Override public InternalScanner preFlush(ObserverContext<RegionCoprocessorEnvironment> e, Store store, InternalScanner scanner) throws IOException { byte[] family = store.getFamily().getName(); return new IncrementSummingScanner(region, IncrementHandlerState.BATCH_UNLIMITED, scanner, ScanType.COMPACT_RETAIN_DELETES, state.getCompactionBound(family), state.getOldestVisibleTimestamp(family)); }
@Override public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan, RegionScanner s) throws IOException { // must see all versions to aggregate increments scan.setMaxVersions(); scan.setFilter(Filters.combine(new IncrementFilter(), scan.getFilter())); return s; }
@Override public boolean next(List<Cell> cells, int limit) throws IOException { return nextInternal(cells, limit); }
@Override public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store, InternalScanner scanner, ScanType scanType) throws IOException { byte[] family = store.getFamily().getName(); return new IncrementSummingScanner(region, IncrementHandlerState.BATCH_UNLIMITED, scanner, scanType, state.getCompactionBound(family), state.getOldestVisibleTimestamp(family)); }
@Override public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store, InternalScanner scanner, ScanType scanType, CompactionRequest request) throws IOException { byte[] family = store.getFamily().getName(); return new IncrementSummingScanner(region, IncrementHandlerState.BATCH_UNLIMITED, scanner, scanType, state.getCompactionBound(family), state.getOldestVisibleTimestamp(family)); } }