@Override public Iterator<ColumnValue> getCurrentValue() throws IOException, InterruptedException { return colScanner.iterator(); }
@Override public Iterator<ColumnValue> getCurrentValue() throws IOException, InterruptedException { return colScanner.iterator(); }
@Override public Iterator<ColumnValue> iterator() { return new RtxCVIterator(cs.getRow(), cs.iterator()); }
@Override public Iterator<ColumnValue> iterator() { return Iterators.transform(cs.iterator(), cv -> { log.trace("txid: {} scanId: {} next()-> {} {} {}", txid, scanId, encRow, Hex.encNonAscii(cv.getColumn()), Hex.encNonAscii(cv.getValue())); return cv; }); }
@Override public Iterator<ColumnValue> iterator() { return Iterators.transform(cs.iterator(), cv -> { log.trace("txid: {} scanId: {} next()-> {} {} {}", txid, scanId, encRow, Hex.encNonAscii(cv.getColumn()), Hex.encNonAscii(cv.getValue())); return cv; }); }
/** * Retrieve all of the information about Periodic Query results already registered * with Fluo. This is returned in the form of {@link CommandNotification}s that * can be registered with the {@link NotificationCoordinatorExecutor}. * @param sx - snapshot for reading results from Fluo * @return - collection of CommandNotifications that indicate Periodic Query information registered with system */ public Collection<CommandNotification> getNotifications(Snapshot sx) { Set<PeriodicQueryMetadata> periodicMetadata = new HashSet<>(); RowScanner scanner = sx.scanner().fetch(FluoQueryColumns.PERIODIC_QUERY_NODE_ID) .over(Span.prefix(IncrementalUpdateConstants.PERIODIC_QUERY_PREFIX)).byRow().build(); Iterator<ColumnScanner> colScannerIter = scanner.iterator(); while (colScannerIter.hasNext()) { ColumnScanner colScanner = colScannerIter.next(); Iterator<ColumnValue> values = colScanner.iterator(); while (values.hasNext()) { PeriodicQueryMetadata metadata = dao.readPeriodicQueryMetadata(sx, values.next().getsValue()); periodicMetadata.add(metadata); } } return getCommandNotifications(sx, periodicMetadata); }
/** * Retrieve all of the information about Periodic Query results already registered * with Fluo. This is returned in the form of {@link CommandNotification}s that * can be registered with the {@link NotificationCoordinatorExecutor}. * @param sx - snapshot for reading results from Fluo * @return - collection of CommandNotifications that indicate Periodic Query information registered with system */ public Collection<CommandNotification> getNotifications(Snapshot sx) { Set<PeriodicQueryMetadata> periodicMetadata = new HashSet<>(); RowScanner scanner = sx.scanner().fetch(FluoQueryColumns.PERIODIC_QUERY_NODE_ID) .over(Span.prefix(IncrementalUpdateConstants.PERIODIC_QUERY_PREFIX)).byRow().build(); Iterator<ColumnScanner> colScannerIter = scanner.iterator(); while (colScannerIter.hasNext()) { ColumnScanner colScanner = colScannerIter.next(); Iterator<ColumnValue> values = colScanner.iterator(); while (values.hasNext()) { PeriodicQueryMetadata metadata = dao.readPeriodicQueryMetadata(sx, values.next().getsValue()); periodicMetadata.add(metadata); } } return getCommandNotifications(sx, periodicMetadata); }
Iterator<ColumnValue> iter = colScanner.iterator(); while (iter.hasNext()) { if (count >= batchSize) {
Iterator<ColumnValue> iter = colScanner.iterator(); while (iter.hasNext()) { if (count >= batchSize) {
final ColumnScanner colScanner = colScannerIter.next(); row = colScanner.getRow(); final Iterator<ColumnValue> iter = colScanner.iterator(); while (iter.hasNext() && !batchLimitMet) { bsSet.add(BS_SERDE.deserialize(iter.next().getValue()));
final ColumnScanner colScanner = colScannerIter.next(); row = colScanner.getRow(); final Iterator<ColumnValue> iter = colScanner.iterator(); while (iter.hasNext() && !batchLimitMet) { bsSet.add(BS_SERDE.deserialize(iter.next().getValue()));
final ColumnScanner colScanner = colScannerIter.next(); row = colScanner.getRow(); final Iterator<ColumnValue> iter = colScanner.iterator(); while (iter.hasNext()) { if (bsSet.size() >= batchSize) {
final ColumnScanner colScanner = colScannerIter.next(); row = colScanner.getRow(); final Iterator<ColumnValue> iter = colScanner.iterator(); while (iter.hasNext()) { if (bsSet.size() >= batchSize) {
private int countResults(FluoClient fluoClient, String nodeId, Column bsColumn) { try (Transaction tx = fluoClient.newTransaction()) { int count = 0; Optional<NodeType> type = NodeType.fromNodeId(nodeId); Bytes prefixBytes = Bytes.of(type.get().getNodeTypePrefix()); RowScanner scanner = tx.scanner().over(Span.prefix(prefixBytes)).fetch(bsColumn).byRow().build(); Iterator<ColumnScanner> colScanners = scanner.iterator(); while (colScanners.hasNext()) { ColumnScanner colScanner = colScanners.next(); BindingSetRow bsRow = BindingSetRow.makeFromShardedRow(prefixBytes, colScanner.getRow()); if (bsRow.getNodeId().equals(nodeId)) { Iterator<ColumnValue> vals = colScanner.iterator(); while (vals.hasNext()) { vals.next(); count++; } } } tx.commit(); return count; } }