private static List<ConnectorSplit> getAllSplits(ConnectorSplitSource splitSource) throws InterruptedException, ExecutionException { ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder(); while (!splitSource.isFinished()) { splits.addAll(splitSource.getNextBatch(NOT_PARTITIONED, 1000).get().getSplits()); } return splits.build(); }
checkState(nextToken.compareAndSet(currentToken, batch.getNextToken())); checkState(hasMoreData.compareAndSet(true, nextToken.get() != null)); return new ConnectorSplitBatch(splits, isFinished()); }, directExecutor()); resultFuture = catchingThriftException(resultFuture);
@Override public ListenableFuture<SplitBatch> getNextBatch(ConnectorPartitionHandle partitionHandle, Lifespan lifespan, int maxSize) { ListenableFuture<ConnectorSplitBatch> nextBatch = toListenableFuture(source.getNextBatch(partitionHandle, maxSize)); return Futures.transform(nextBatch, splitBatch -> { ImmutableList.Builder<Split> result = ImmutableList.builder(); for (ConnectorSplit connectorSplit : splitBatch.getSplits()) { result.add(new Split(connectorId, transactionHandle, connectorSplit, lifespan)); } return new SplitBatch(result.build(), splitBatch.isNoMoreSplits()); }, directExecutor()); }
return new ConnectorSplitBatch(splits, splits.isEmpty() && queues.isFinished(bucketNumber)); return new ConnectorSplitBatch(splits, false);
private Supplier<ConnectorSplitBatch> batchSupplier(int maxSize) { return () -> { ImmutableList.Builder<ConnectorSplit> list = ImmutableList.builder(); for (int i = 0; i < maxSize; i++) { if (Thread.currentThread().isInterrupted()) { throw new RuntimeException("Split batch fetch was interrupted"); } if (!iterator.hasNext()) { break; } list.add(createSplit(iterator.next())); } return new ConnectorSplitBatch(list.build(), isFinished()); }; }
@SuppressWarnings("ObjectEquality") @Override public CompletableFuture<ConnectorSplitBatch> getNextBatch(ConnectorPartitionHandle partitionHandle, int maxSize) { if (!partitionHandle.equals(NOT_PARTITIONED)) { throw new IllegalArgumentException("partitionHandle must be NOT_PARTITIONED"); } int remainingSplits = splits.size() - offset; int size = Math.min(remainingSplits, maxSize); List<ConnectorSplit> results = splits.subList(offset, offset + size); offset += size; return completedFuture(new ConnectorSplitBatch(results, isFinished())); }
private static List<ConnectorSplit> getSplits(ConnectorSplitSource source, int maxSize) { return getFutureValue(source.getNextBatch(NOT_PARTITIONED, maxSize)).getSplits(); } }
private static List<ConnectorSplit> getSplits(ConnectorSplitSource source, OptionalInt bucketNumber, int maxSize) { if (bucketNumber.isPresent()) { return getFutureValue(source.getNextBatch(new HivePartitionHandle(bucketNumber.getAsInt()), maxSize)).getSplits(); } else { return getFutureValue(source.getNextBatch(NOT_PARTITIONED, maxSize)).getSplits(); } }
@Override public CompletableFuture<ConnectorSplitBatch> getNextBatch(ConnectorPartitionHandle partitionHandle, int maxSize) { checkArgument(partitionHandle.equals(NOT_PARTITIONED), "partitionHandle must be NOT_PARTITIONED"); return notEmptyFuture .thenApply(x -> getBatch(maxSize)) .thenApply(splits -> new ConnectorSplitBatch(splits, isFinished())); }
public JdbcSplit getSplit(String schemaName, String tableName) { JdbcTableHandle jdbcTableHandle = jdbcClient.getTableHandle(new SchemaTableName(schemaName, tableName)); JdbcTableLayoutHandle jdbcLayoutHandle = new JdbcTableLayoutHandle(jdbcTableHandle, TupleDomain.all()); ConnectorSplitSource splits = jdbcClient.getSplits(jdbcLayoutHandle); return (JdbcSplit) getOnlyElement(getFutureValue(splits.getNextBatch(NOT_PARTITIONED, 1000)).getSplits()); }
protected static int getSplitCount(ConnectorSplitSource splitSource) { int splitCount = 0; while (!splitSource.isFinished()) { splitCount += getFutureValue(splitSource.getNextBatch(NOT_PARTITIONED, 1000)).getSplits().size(); } return splitCount; }
private static List<ConnectorSplit> getAllSplits(ConnectorSplitSource splitSource) { ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder(); while (!splitSource.isFinished()) { splits.addAll(getFutureValue(splitSource.getNextBatch(NOT_PARTITIONED, 1000)).getSplits()); } return splits.build(); }
protected static List<ConnectorSplit> getAllSplits(ConnectorSplitSource splitSource) { ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder(); while (!splitSource.isFinished()) { splits.addAll(getFutureValue(splitSource.getNextBatch(NOT_PARTITIONED, 1000)).getSplits()); } return splits.build(); }
protected static List<ConnectorSplit> getAllSplits(ConnectorSplitSource splitSource) { ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder(); while (!splitSource.isFinished()) { splits.addAll(getFutureValue(splitSource.getNextBatch(NOT_PARTITIONED, 1000)).getSplits()); } return splits.build(); }
protected static int getSplitCount(ConnectorSplitSource splitSource) { int splitCount = 0; while (!splitSource.isFinished()) { splitCount += getFutureValue(splitSource.getNextBatch(NOT_PARTITIONED, 1000)).getSplits().size(); } return splitCount; }
private static List<HiveSplit> drainSplits(HiveSplitSource source) throws Exception { ImmutableList.Builder<HiveSplit> splits = ImmutableList.builder(); while (!source.isFinished()) { source.getNextBatch(NOT_PARTITIONED, 100).get() .getSplits().stream() .map(HiveSplit.class::cast) .forEach(splits::add); } return splits.build(); }
private RecordCursor getCursor(JdbcTableHandle jdbcTableHandle, List<JdbcColumnHandle> columns, TupleDomain<ColumnHandle> domain) { JdbcTableLayoutHandle layoutHandle = new JdbcTableLayoutHandle(jdbcTableHandle, domain); ConnectorSplitSource splits = jdbcClient.getSplits(layoutHandle); JdbcSplit split = (JdbcSplit) getOnlyElement(getFutureValue(splits.getNextBatch(NOT_PARTITIONED, 1000)).getSplits()); ConnectorTransactionHandle transaction = new JdbcTransactionHandle(); JdbcRecordSetProvider recordSetProvider = new JdbcRecordSetProvider(jdbcClient); RecordSet recordSet = recordSetProvider.getRecordSet(transaction, SESSION, split, columns); return recordSet.cursor(); } }
private static List<ConnectorSplit> getSplits(ConnectorSplitSource source, OptionalInt bucketNumber, int maxSize) { if (bucketNumber.isPresent()) { return getFutureValue(source.getNextBatch(new HivePartitionHandle(bucketNumber.getAsInt()), maxSize)).getSplits(); } else { return getFutureValue(source.getNextBatch(NOT_PARTITIONED, maxSize)).getSplits(); } }
private RecordCursor getCursor(JdbcTableHandle jdbcTableHandle, List<JdbcColumnHandle> columns, TupleDomain<ColumnHandle> domain) { JdbcTableLayoutHandle layoutHandle = new JdbcTableLayoutHandle(jdbcTableHandle, domain); ConnectorSplitSource splits = jdbcClient.getSplits(layoutHandle); JdbcSplit split = (JdbcSplit) getOnlyElement(getFutureValue(splits.getNextBatch(NOT_PARTITIONED, 1000)).getSplits()); ConnectorTransactionHandle transaction = new JdbcTransactionHandle(); JdbcRecordSetProvider recordSetProvider = new JdbcRecordSetProvider(jdbcClient); RecordSet recordSet = recordSetProvider.getRecordSet(transaction, SESSION, split, columns); return recordSet.cursor(); } }
@Override public CompletableFuture<ConnectorSplitBatch> getNextBatch(ConnectorPartitionHandle partitionHandle, int maxSize) { checkArgument(partitionHandle.equals(NOT_PARTITIONED), "partitionHandle must be NOT_PARTITIONED"); return notEmptyFuture .thenApply(x -> getBatch(maxSize)) .thenApply(splits -> new ConnectorSplitBatch(splits, isFinished())); }