@Override public Result next() throws IOException { if (hasNext()) { Result toReturn = next; next = null; return toReturn; } return null; }
@Override public boolean hasNext() { if (next == null) { try { next = DistributedScanner.this.next(); return next != null; } catch (IOException e) { throw new RuntimeException(e); } } return true; }
private boolean hasNext() throws IOException { if (next != null) { return true; } next = nextInternal(); return next != null; }
private ResultScanner getScanner(ScanBuilder scanBuilder) throws IOException { return rowKeyDistributor == null ? hTable.getScanner(scanBuilder.build()) : DistributedScanner.create(hTable, scanBuilder.build(), rowKeyDistributor, scanExecutor); }
public static DistributedScanner create(HTableInterface hTable, Scan originalScan, AbstractRowKeyDistributor keyDistributor, ExecutorService scansExecutor) throws IOException { Scan[] scans = keyDistributor.getDistributedScans(originalScan); ResultScanner[] rss = new ResultScanner[scans.length]; for (int i = 0; i < scans.length; i++) { rss[i] = hTable.getScanner(scans[i]); } int caching = originalScan.getCaching(); // to optimize work of distributed scan we need to know that, so we are resolving it from config in the case it is // not set for scan if (caching < 1) { caching = hTable.getConfiguration().getInt("hbase.client.scanner.caching", 1); } return new DistributedScanner(keyDistributor, rss, caching, scansExecutor); }
private ResultScanner getScanner(ScanBuilder scanBuilder) throws IOException { return rowKeyDistributor == null ? hTable.getScanner(scanBuilder.build()) : DistributedScanner.create(hTable, scanBuilder.build(), rowKeyDistributor, scanExecutor); }
public static DistributedScanner create(HTableInterface hTable, Scan originalScan, AbstractRowKeyDistributor keyDistributor, ExecutorService scansExecutor) throws IOException { Scan[] scans = keyDistributor.getDistributedScans(originalScan); ResultScanner[] rss = new ResultScanner[scans.length]; for (int i = 0; i < scans.length; i++) { rss[i] = hTable.getScanner(scans[i]); } int caching = originalScan.getCaching(); // to optimize work of distributed scan we need to know that, so we are resolving it from config in the case it is // not set for scan if (caching < 1) { caching = hTable.getConfiguration().getInt("hbase.client.scanner.caching", 1); } return new DistributedScanner(keyDistributor, rss, caching, scansExecutor); }
@Override public QueueScanner createScanner(ConsumerConfig consumerConfig, HTable hTable, Scan scan, int numRows) throws IOException { // we should roughly divide by number of buckets, but don't want another RPC for the case we are not exactly right ScanBuilder distributedScan = tableUtil.buildScan(scan); int caching = (int) (1.1 * numRows / distributorBuckets); distributedScan.setCaching(caching); ResultScanner scanner = DistributedScanner.create(hTable, distributedScan.build(), rowKeyDistributor, scansExecutor); return new HBaseQueueScanner(scanner, numRows, rowKeyConverter); }
@Override public boolean hasNext() { if (next == null) { try { next = DistributedScanner.this.next(); return next != null; } catch (IOException e) { throw new RuntimeException(e); } } return true; }
private boolean hasNext() throws IOException { if (next != null) { return true; } next = nextInternal(); return next != null; }
@Override public Result next() throws IOException { if (hasNext()) { Result toReturn = next; next = null; return toReturn; } return null; }
@Override public void rollback(byte[] startKey, byte[] stopKey, byte[] txWritePtr) throws IOException { Scan scan = tableUtil.buildScan() .setStartRow(startKey) .setStopRow(stopKey) .setCaching(scanCacheRows) .build(); List<Put> batchPuts = new ArrayList<>(); try (ResultScanner scanner = DistributedScanner.create(hTable, scan, rowKeyDistributor, scanExecutor)) { for (Result result : scanner) { // No need to turn the key back to the original row key because we want to put with the actual row key PutBuilder putBuilder = tableUtil.buildPut(result.getRow()); putBuilder.add(columnFamily, TX_COL, txWritePtr); batchPuts.add(putBuilder.build()); } } try { if (!batchPuts.isEmpty()) { hTable.put(batchPuts); if (!hTable.isAutoFlush()) { hTable.flushCommits(); } } } catch (IOException e) { throw exceptionHandler.handle(e); } }
@Override public Result[] next(int nbRows) throws IOException { // Identical to HTable.ClientScanner implementation // Collect values to be returned here ArrayList<Result> resultSets = new ArrayList<>(nbRows); for (int i = 0; i < nbRows; i++) { Result next = next(); if (next != null) { resultSets.add(next); } else { break; } } return resultSets.toArray(new Result[resultSets.size()]); }
@Override public void rollback(byte[] startKey, byte[] stopKey, byte[] txWritePtr) throws IOException { Scan scan = tableUtil.buildScan() .setStartRow(startKey) .setStopRow(stopKey) .setCaching(scanCacheRows) .build(); List<Put> batchPuts = new ArrayList<>(); try (ResultScanner scanner = DistributedScanner.create(hTable, scan, rowKeyDistributor, scanExecutor)) { for (Result result : scanner) { // No need to turn the key back to the original row key because we want to put with the actual row key PutBuilder putBuilder = tableUtil.buildPut(result.getRow()); putBuilder.add(columnFamily, TX_COL, txWritePtr); batchPuts.add(putBuilder.build()); } } try { if (!batchPuts.isEmpty()) { hTable.put(batchPuts); if (!hTable.isAutoFlush()) { hTable.flushCommits(); } } } catch (IOException e) { throw exceptionHandler.handle(e); } }
@Override public Result[] next(int nbRows) throws IOException { // Identical to HTable.ClientScanner implementation // Collect values to be returned here ArrayList<Result> resultSets = new ArrayList<>(nbRows); for (int i = 0; i < nbRows; i++) { Result next = next(); if (next != null) { resultSets.add(next); } else { break; } } return resultSets.toArray(new Result[resultSets.size()]); }
.build(); final ResultScanner scanner = DistributedScanner.create(hTable, scan, rowKeyDistributor, scanExecutor); return new AbstractCloseableIterator<RawPayloadTableEntry>() { private final RawPayloadTableEntry tableEntry = new RawPayloadTableEntry();
.build(); final ResultScanner scanner = DistributedScanner.create(hTable, scan, rowKeyDistributor, scanExecutor); return new AbstractCloseableIterator<RawPayloadTableEntry>() { private final RawPayloadTableEntry tableEntry = new RawPayloadTableEntry();
final ResultScanner scanner = DistributedScanner.create(hTable, scan, keyDistributor, scanExecutor); return new StateScanner() {
private void testSimpleScanInternal(long origKeyPrefix, Scan scan, int numValues, int startWithValue, int seekIntervalMinValue, int seekIntervalMaxValue) throws IOException { int valuesCountInSeekInterval = writeTestData(origKeyPrefix, numValues, startWithValue, seekIntervalMinValue, seekIntervalMaxValue); // TODO: add some filters to the scan for better testing ResultScanner distributedScanner = DistributedScanner.create(hTable, scan, keyDistributor, Executors.newFixedThreadPool(2)); Result previous = null; int countMatched = 0; for (Result current : distributedScanner) { countMatched++; if (previous != null) { byte[] currentRowOrigKey = keyDistributor.getOriginalKey(current.getRow()); byte[] previousRowOrigKey = keyDistributor.getOriginalKey(previous.getRow()); Assert.assertTrue(Bytes.compareTo(currentRowOrigKey, previousRowOrigKey) >= 0); int currentValue = Bytes.toInt(current.getValue(CF, QUAL)); Assert.assertTrue(currentValue >= seekIntervalMinValue); Assert.assertTrue(currentValue <= seekIntervalMaxValue); } previous = current; } Assert.assertEquals(valuesCountInSeekInterval, countMatched); }
final ResultScanner scanner = DistributedScanner.create(hTable, scan, rowKeyDistributor, scanExecutor); final RawMessageTableEntry tableEntry = new RawMessageTableEntry(); return new AbstractCloseableIterator<RawMessageTableEntry>() {