private ScanBuilder configureRangeScan(ScanBuilder scan, @Nullable byte[] startRow, @Nullable byte[] stopRow, @Nullable FuzzyRowFilter filter) { // todo: should be configurable scan.setCaching(1000); if (startRow != null) { scan.setStartRow(startRow); } if (stopRow != null) { scan.setStopRow(stopRow); } scan.addFamily(columnFamily); if (filter != null) { List<Pair<byte[], byte[]>> fuzzyPairs = Lists.newArrayListWithExpectedSize(filter.getFuzzyKeysData().size()); for (ImmutablePair<byte[], byte[]> pair : filter.getFuzzyKeysData()) { if (rowKeyDistributor != null) { fuzzyPairs.addAll(rowKeyDistributor.getDistributedFilterPairs(pair)); } else { // Make a copy of filter pair because the key and mask will get modified in HBase FuzzyRowFilter. fuzzyPairs.add(Pair.newPair(Arrays.copyOf(pair.getFirst(), pair.getFirst().length), Arrays.copyOf(pair.getSecond(), pair.getSecond().length))); } } scan.setFilter(new org.apache.hadoop.hbase.filter.FuzzyRowFilter(fuzzyPairs)); } return scan; }
private ScanBuilder configureRangeScan(ScanBuilder scan, @Nullable byte[] startRow, @Nullable byte[] stopRow, @Nullable FuzzyRowFilter filter) { // todo: should be configurable scan.setCaching(1000); if (startRow != null) { scan.setStartRow(startRow); } if (stopRow != null) { scan.setStopRow(stopRow); } scan.addFamily(columnFamily); if (filter != null) { List<Pair<byte[], byte[]>> fuzzyPairs = Lists.newArrayListWithExpectedSize(filter.getFuzzyKeysData().size()); for (ImmutablePair<byte[], byte[]> pair : filter.getFuzzyKeysData()) { if (rowKeyDistributor != null) { fuzzyPairs.addAll(rowKeyDistributor.getDistributedFilterPairs(pair)); } else { // Make a copy of filter pair because the key and mask will get modified in HBase FuzzyRowFilter. fuzzyPairs.add(Pair.newPair(Arrays.copyOf(pair.getFirst(), pair.getFirst().length), Arrays.copyOf(pair.getSecond(), pair.getSecond().length))); } } scan.setFilter(new org.apache.hadoop.hbase.filter.FuzzyRowFilter(fuzzyPairs)); } return scan; }
@Override public List<TopicId> listTopics(NamespaceId namespaceId) throws IOException { byte[] startRow = MessagingUtils.topicScanKey(namespaceId); ScanBuilder scanBuilder = tableUtil.buildScan() .setStartRow(startRow) .setStopRow(Bytes.stopKeyForPrefix(startRow)); return scanTopics(scanBuilder); }
@Override public List<TopicId> listTopics(NamespaceId namespaceId) throws IOException { byte[] startRow = MessagingUtils.topicScanKey(namespaceId); ScanBuilder scanBuilder = tableUtil.buildScan() .setStartRow(startRow) .setStopRow(Bytes.stopKeyForPrefix(startRow)); return scanTopics(scanBuilder); }
hScan.setStopRow(stopRow);
hScan.setStopRow(stopRow);
@Override public void rollback(byte[] startKey, byte[] stopKey, byte[] txWritePtr) throws IOException { Scan scan = tableUtil.buildScan() .setStartRow(startKey) .setStopRow(stopKey) .setCaching(scanCacheRows) .build(); List<Put> batchPuts = new ArrayList<>(); try (ResultScanner scanner = DistributedScanner.create(hTable, scan, rowKeyDistributor, scanExecutor)) { for (Result result : scanner) { // No need to turn the key back to the original row key because we want to put with the actual row key PutBuilder putBuilder = tableUtil.buildPut(result.getRow()); putBuilder.add(columnFamily, TX_COL, txWritePtr); batchPuts.add(putBuilder.build()); } } try { if (!batchPuts.isEmpty()) { hTable.put(batchPuts); if (!hTable.isAutoFlush()) { hTable.flushCommits(); } } } catch (IOException e) { throw exceptionHandler.handle(e); } }
@Override public void rollback(byte[] startKey, byte[] stopKey, byte[] txWritePtr) throws IOException { Scan scan = tableUtil.buildScan() .setStartRow(startKey) .setStopRow(stopKey) .setCaching(scanCacheRows) .build(); List<Put> batchPuts = new ArrayList<>(); try (ResultScanner scanner = DistributedScanner.create(hTable, scan, rowKeyDistributor, scanExecutor)) { for (Result result : scanner) { // No need to turn the key back to the original row key because we want to put with the actual row key PutBuilder putBuilder = tableUtil.buildPut(result.getRow()); putBuilder.add(columnFamily, TX_COL, txWritePtr); batchPuts.add(putBuilder.build()); } } try { if (!batchPuts.isEmpty()) { hTable.put(batchPuts); if (!hTable.isAutoFlush()) { hTable.flushCommits(); } } } catch (IOException e) { throw exceptionHandler.handle(e); } }
Scan scan = tableUtil.buildScan() .setStartRow(startRow) .setStopRow(stopRow) .setCaching(scanCacheRows) .build();
Scan scan = tableUtil.buildScan() .setStartRow(startRow) .setStopRow(stopRow) .setCaching(scanCacheRows) .build();
Scan scan = tableUtil.buildScan() .setStartRow(startRow) .setStopRow(stopRow) .setMaxVersions(1) .addColumn(QueueEntryRow.COLUMN_FAMILY, stateColumnName)
Scan scan = tableUtil.buildScan() .setStartRow(startRow) .setStopRow(stopRow) .setCaching(scanCacheRows) .build();
Scan scan = tableUtil.buildScan() .setStartRow(startRow) .setStopRow(stopRow) .setCaching(scanCacheRows) .build();
private ResultScanner createHBaseScanner(ConsumerConfig consumerConfig, HTable hTable, Scan scan, int numRows) throws IOException { // Modify the scan with sharded key prefix ScanBuilder shardedScan = tableUtil.buildScan(scan); // we should roughly divide by number of buckets, but don't want another RPC for the case we are not exactly right int caching = (int) (1.1 * numRows / distributorBuckets); shardedScan.setCaching(caching); if (scan.getStartRow().length > 0) { byte[] rowKey = getShardedKey(consumerConfig, consumerConfig.getInstanceId(), scan.getStartRow()); shardedScan.setStartRow(rowKey); } if (scan.getStopRow().length > 0) { byte[] rowKey = getShardedKey(consumerConfig, consumerConfig.getInstanceId(), scan.getStopRow()); shardedScan.setStopRow(rowKey); } return DistributedScanner.create(hTable, shardedScan.build(), rowKeyDistributor, scansExecutor); }