Scan scan = scanBuilder.setCaching(metadataScanSize).build(); try (ResultScanner scanner = metadataTable.getScanner(scan)) { for (Result result : scanner) {
Scan scan = scanBuilder.setCaching(metadataScanSize).build(); try (ResultScanner scanner = metadataTable.getScanner(scan)) { for (Result result : scanner) {
@Override public QueueScanner createScanner(ConsumerConfig consumerConfig, HTable hTable, Scan scan, int numRows) throws IOException { // we should roughly divide by number of buckets, but don't want another RPC for the case we are not exactly right ScanBuilder distributedScan = tableUtil.buildScan(scan); int caching = (int) (1.1 * numRows / distributorBuckets); distributedScan.setCaching(caching); ResultScanner scanner = DistributedScanner.create(hTable, distributedScan.build(), rowKeyDistributor, scansExecutor); return new HBaseQueueScanner(scanner, numRows, rowKeyConverter); }
/** * Scans the HBase table to get a list of {@link TopicId}. */ private List<TopicId> scanTopics(ScanBuilder scanBuilder) throws IOException { Scan scan = scanBuilder.setFilter(new FirstKeyOnlyFilter()).setCaching(scanCacheRows).build(); try { List<TopicId> topicIds = new ArrayList<>(); try (ResultScanner resultScanner = hTable.getScanner(scan)) { for (Result result : resultScanner) { TopicId topicId = MessagingUtils.toTopicId(result.getRow()); byte[] value = result.getValue(columnFamily, COL); Map<String, String> properties = GSON.fromJson(Bytes.toString(value), MAP_TYPE); TopicMetadata metadata = new TopicMetadata(topicId, properties); if (metadata.exists()) { topicIds.add(topicId); } } } return topicIds; } catch (IOException e) { throw exceptionHandler.handle(e); } }
private static Map<String, Long> getMapFromTable(String rowType) throws IOException { HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get(); HTable hTable = tableUtil.createHTable(hConf, getReplicationStateTableId(tableUtil)); // Scan the table to scan for all regions. ScanBuilder scan = getScanBuilder(tableUtil, rowType); Result result; HashMap<String, Long> timeMap = new HashMap<>(); try (ResultScanner resultScanner = hTable.getScanner(scan.build())) { while ((result = resultScanner.next()) != null) { ReplicationStatusKey key = new ReplicationStatusKey(result.getRow()); String region = key.getRegionName(); Long timestamp = getTimeFromResult(result, rowType); if (timeMap.get(region) == null || timestamp > timeMap.get(region)) { timeMap.put(region, timestamp); } } } catch (Exception e) { LOG.error("Error while reading table.", e); throw Throwables.propagate(e); } finally { hTable.close(); } return timeMap; }
/** * Scans the HBase table to get a list of {@link TopicId}. */ private List<TopicId> scanTopics(ScanBuilder scanBuilder) throws IOException { Scan scan = scanBuilder.setFilter(new FirstKeyOnlyFilter()).setCaching(scanCacheRows).build(); try { List<TopicId> topicIds = new ArrayList<>(); try (ResultScanner resultScanner = hTable.getScanner(scan)) { for (Result result : resultScanner) { TopicId topicId = MessagingUtils.toTopicId(result.getRow()); byte[] value = result.getValue(columnFamily, COL); Map<String, String> properties = GSON.fromJson(Bytes.toString(value), MAP_TYPE); TopicMetadata metadata = new TopicMetadata(topicId, properties); if (metadata.exists()) { topicIds.add(topicId); } } } return topicIds; } catch (IOException e) { throw exceptionHandler.handle(e); } }
hScan.setAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY, getEncodedTx()); ResultScanner resultScanner = wrapResultScanner(hTable.getScanner(hScan.build())); return new HBaseScanner(resultScanner, columnFamily);
hScan.setAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY, getEncodedTx()); ResultScanner resultScanner = wrapResultScanner(hTable.getScanner(hScan.build())); return new HBaseScanner(resultScanner, columnFamily);
@Override public void rollback(byte[] startKey, byte[] stopKey, byte[] txWritePtr) throws IOException { Scan scan = tableUtil.buildScan() .setStartRow(startKey) .setStopRow(stopKey) .setCaching(scanCacheRows) .build(); List<Put> batchPuts = new ArrayList<>(); try (ResultScanner scanner = DistributedScanner.create(hTable, scan, rowKeyDistributor, scanExecutor)) { for (Result result : scanner) { // No need to turn the key back to the original row key because we want to put with the actual row key PutBuilder putBuilder = tableUtil.buildPut(result.getRow()); putBuilder.add(columnFamily, TX_COL, txWritePtr); batchPuts.add(putBuilder.build()); } } try { if (!batchPuts.isEmpty()) { hTable.put(batchPuts); if (!hTable.isAutoFlush()) { hTable.flushCommits(); } } } catch (IOException e) { throw exceptionHandler.handle(e); } }
@Override public void rollback(byte[] startKey, byte[] stopKey, byte[] txWritePtr) throws IOException { Scan scan = tableUtil.buildScan() .setStartRow(startKey) .setStopRow(stopKey) .setCaching(scanCacheRows) .build(); List<Put> batchPuts = new ArrayList<>(); try (ResultScanner scanner = DistributedScanner.create(hTable, scan, rowKeyDistributor, scanExecutor)) { for (Result result : scanner) { // No need to turn the key back to the original row key because we want to put with the actual row key PutBuilder putBuilder = tableUtil.buildPut(result.getRow()); putBuilder.add(columnFamily, TX_COL, txWritePtr); batchPuts.add(putBuilder.build()); } } try { if (!batchPuts.isEmpty()) { hTable.put(batchPuts); if (!hTable.isAutoFlush()) { hTable.flushCommits(); } } } catch (IOException e) { throw exceptionHandler.handle(e); } }
.setStopRow(stopRow) .setCaching(scanCacheRows) .build();
.setStopRow(stopRow) .setCaching(scanCacheRows) .build();
.addColumn(QueueEntryRow.COLUMN_FAMILY, stateColumnName) .setCaching(MAX_SCAN_ROWS) .build();
.setStopRow(stopRow) .setCaching(scanCacheRows) .build();
private static void dumpReplicationStateTable() throws Exception { System.out.println("\nThis is all the HBase regions on the Cluster:"); HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get(); HTable hTable = tableUtil.createHTable(hConf, getReplicationStateTableId(tableUtil)); ScanBuilder scan = tableUtil.buildScan(); scan.addColumn(Bytes.toBytes(ReplicationConstants.ReplicationStatusTool.TIME_FAMILY), Bytes.toBytes(ReplicationConstants.ReplicationStatusTool.WRITE_TIME_ROW_TYPE)); scan.addColumn(Bytes.toBytes(ReplicationConstants.ReplicationStatusTool.TIME_FAMILY), Bytes.toBytes(ReplicationConstants.ReplicationStatusTool.REPLICATE_TIME_ROW_TYPE)); Result result; try (ResultScanner resultScanner = hTable.getScanner(scan.build())) { while ((result = resultScanner.next()) != null) { ReplicationStatusKey key = new ReplicationStatusKey(result.getRow()); String rowType = key.getRowType(); String region = key.getRegionName(); UUID rsID = key.getRsID(); Long writeTime = getTimeFromResult(result, ReplicationConstants.ReplicationStatusTool.WRITE_TIME_ROW_TYPE); Long replicateTime = getTimeFromResult(result, ReplicationConstants.ReplicationStatusTool.REPLICATE_TIME_ROW_TYPE); System.out.println("Key=>rowType:" + rowType + ":region:" + region + ":RSID:" + rsID + " writeTime:" + writeTime + ":replicateTime:" + replicateTime); } } finally { hTable.close(); } }
.setStopRow(stopRow) .setCaching(scanCacheRows) .build();
private ResultScanner createHBaseScanner(ConsumerConfig consumerConfig, HTable hTable, Scan scan, int numRows) throws IOException { // Modify the scan with sharded key prefix ScanBuilder shardedScan = tableUtil.buildScan(scan); // we should roughly divide by number of buckets, but don't want another RPC for the case we are not exactly right int caching = (int) (1.1 * numRows / distributorBuckets); shardedScan.setCaching(caching); if (scan.getStartRow().length > 0) { byte[] rowKey = getShardedKey(consumerConfig, consumerConfig.getInstanceId(), scan.getStartRow()); shardedScan.setStartRow(rowKey); } if (scan.getStopRow().length > 0) { byte[] rowKey = getShardedKey(consumerConfig, consumerConfig.getInstanceId(), scan.getStopRow()); shardedScan.setStopRow(rowKey); } return DistributedScanner.create(hTable, shardedScan.build(), rowKeyDistributor, scansExecutor); }