/** * Little helper to set the table byte array. If it's different than the last * table we reset the byte array. Otherwise we just use the existing array. * @param table The table we're operating against */ private void setTable(final String table) { if (!lastTable.equals(table)) { lastTable = table; BigtableTableName tableName = options .getInstanceName() .toTableName(table); lastTableBytes = tableName .toString() .getBytes(); synchronized(this) { if (bulkMutation != null) { try { bulkMutation.flush(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } bulkMutation = session.createBulkMutation(tableName); } } }
private RowMutation newRowMutationModel(byte [] rowKey) { if (!mutationAdapters.putAdapter.isSetClientTimestamp()) { return RowMutation.create( bigtableTableName.getTableId(), ByteString.copyFrom(rowKey), Mutation.createUnsafe()); } return RowMutation.create(bigtableTableName.getTableId(), ByteString.copyFrom(rowKey)); }
/** {@inheritDoc} */ @Override public List<HBaseProtos.SnapshotDescription> listSnapshots() throws IOException { ListSnapshotsRequest request = ListSnapshotsRequest.newBuilder() .setParent(getSnapshotClusterName().toString()) .build(); ListSnapshotsResponse snapshotList = Futures.getChecked( bigtableTableAdminClient.listSnapshotsAsync(request), IOException.class ); List<HBaseProtos.SnapshotDescription> response = new ArrayList<>(); for (Snapshot snapshot : snapshotList.getSnapshotsList()) { BigtableSnapshotName snapshotName = new BigtableSnapshotName(snapshot.getName()); BigtableTableName tableName = new BigtableTableName(snapshot.getSourceTable().getName()); response.add(HBaseProtos.SnapshotDescription.newBuilder() .setName(snapshotName.getSnapshotId()) .setTable(tableName.getTableId()) .setCreationTime(TimeUnit.SECONDS.toMillis(snapshot.getCreateTime().getSeconds())) .build()); } return response; }
/** * <p>toTableName.</p> * * @param tableId a {@link java.lang.String} object. * @return a {@link com.google.cloud.bigtable.grpc.BigtableTableName} object. */ public BigtableTableName toTableName(String tableId) { return new BigtableTableName(toTableNameStr(tableId)); }
public CheckAndMutateBuilderImpl(BigtableDataClient client, HBaseRequestAdapter hbaseAdapter, byte[] row, byte[] family) { this.client = client; this.builder = new CheckAndMutateUtil.RequestBuilder(hbaseAdapter, row, family); BigtableTableName bigtableTableName = hbaseAdapter.getBigtableTableName(); // Once the IBigtableDataClient interface is implemented this will be removed this.requestContext = RequestContext.create(bigtableTableName.toGcbInstanceName(), ""); }
private RowMutation newRowMutationModel(byte [] rowKey) { if (!mutationAdapters.putAdapter.isSetClientTimestamp()) { return RowMutation.create( bigtableTableName.getTableId(), ByteString.copyFrom(rowKey), Mutation.createUnsafe()); } return RowMutation.create(bigtableTableName.getTableId(), ByteString.copyFrom(rowKey)); }
public CheckAndMutateBuilderImpl(BigtableDataClient client, HBaseRequestAdapter hbaseAdapter, byte[] row, byte[] family) { this.client = client; this.builder = new CheckAndMutateUtil.RequestBuilder(hbaseAdapter, row, family); BigtableTableName bigtableTableName = hbaseAdapter.getBigtableTableName(); // Once the IBigtableDataClient interface is implemented this will be removed this.requestContext = RequestContext.create(bigtableTableName.toGcbInstanceName(), ""); }
/** * Constructor for BulkRead. * @param client a {@link BigtableDataClient} object. * @param tableName a {@link BigtableTableName} object. * @param batchSizes The number of keys to lookup per RPC. * @param threadPool the {@link ExecutorService} to execute the batched reads on */ public BulkRead(BigtableDataClient client, BigtableTableName tableName, int batchSizes, ExecutorService threadPool) { this.client = client; this.tableName = tableName.toString(); this.batchSizes = batchSizes; this.threadPool = threadPool; this.batches = new HashMap<>(); }
/** * <p>adapt.</p> * * @param append a {@link org.apache.hadoop.hbase.client.Append} object. * @return a {@link com.google.bigtable.v2.ReadModifyWriteRowRequest} object. */ public ReadModifyWriteRowRequest adapt(Append append) { ReadModifyWriteRow readModifyWriteRow = ReadModifyWriteRow .create(bigtableTableName.getTableId(), ByteString.copyFrom(append.getRow())); Adapters.APPEND_ADAPTER.adapt(append, readModifyWriteRow); return readModifyWriteRow.toProto(requestContext); }
public BigtableAsyncTable(BigtableAsyncConnection asyncConnection, HBaseRequestAdapter hbaseAdapter) { this.asyncConnection = asyncConnection; BigtableSession session = asyncConnection.getSession(); this.client = new BigtableDataClient(session.getDataClient()); this.hbaseAdapter = hbaseAdapter; this.tableName = hbaseAdapter.getTableName(); // Once the IBigtableDataClient interface is implemented this will be removed this.requestContext = RequestContext.create(hbaseAdapter.getBigtableTableName().toGcbInstanceName(), ""); }
/** * <p>getTableNameString.</p> * * @return a {@link java.lang.String} object. */ protected String getTableNameString() { return getBigtableTableName().toString(); }
/** * <p>adapt.</p> * * @param increment a {@link org.apache.hadoop.hbase.client.Increment} object. * @return a {@link com.google.bigtable.v2.ReadModifyWriteRowRequest} object. */ public ReadModifyWriteRowRequest adapt(Increment increment) { ReadModifyWriteRow readModifyWriteRow = ReadModifyWriteRow .create(bigtableTableName.getTableId(), ByteString.copyFrom(increment.getRow())); Adapters.INCREMENT_ADAPTER.adapt(increment, readModifyWriteRow); return readModifyWriteRow.toProto(requestContext); }
public BigtableAsyncTable(BigtableAsyncConnection asyncConnection, HBaseRequestAdapter hbaseAdapter) { this.asyncConnection = asyncConnection; BigtableSession session = asyncConnection.getSession(); this.client = new BigtableDataClient(session.getDataClient()); this.hbaseAdapter = hbaseAdapter; this.tableName = hbaseAdapter.getTableName(); // Once the IBigtableDataClient interface is implemented this will be removed this.requestContext = RequestContext.create(hbaseAdapter.getBigtableTableName().toGcbInstanceName(), ""); }
/** * <p>getTableNameString.</p> * * @return a {@link java.lang.String} object. */ protected String getTableNameString() { return getBigtableTableName().toString(); }
/** * <p>adapt.</p> * * @param append a {@link org.apache.hadoop.hbase.client.Append} object. * @return a {@link com.google.bigtable.v2.ReadModifyWriteRowRequest} object. */ public ReadModifyWriteRowRequest adapt(Append append) { ReadModifyWriteRow readModifyWriteRow = ReadModifyWriteRow .create(bigtableTableName.getTableId(), ByteString.copyFrom(append.getRow())); Adapters.APPEND_ADAPTER.adapt(append, readModifyWriteRow); return readModifyWriteRow.toProto(requestContext); }
@VisibleForTesting void waitForReplication(BigtableTableName tableName, BackOff backOff) throws InterruptedException, TimeoutException { String token = generateConsistencyToken(tableName); while (!checkConsistency(tableName, token)) { long backOffMillis; try { backOffMillis = backOff.nextBackOffMillis(); } catch (IOException e) { // Should never happen, we only use ExponentialBackOff which doesn't throw. throw new RuntimeException("Problem getting backoff: " + e); } if (backOffMillis == BackOff.STOP) { throw new TimeoutException( "Table " + tableName.toString() + " is not consistent after timeout."); } else { // sleep for backOffMillis milliseconds and retry operation. Thread.sleep(backOffMillis); } } }
/** * <p>adapt.</p> * * @param increment a {@link org.apache.hadoop.hbase.client.Increment} object. * @return a {@link com.google.bigtable.v2.ReadModifyWriteRowRequest} object. */ public ReadModifyWriteRowRequest adapt(Increment increment) { ReadModifyWriteRow readModifyWriteRow = ReadModifyWriteRow .create(bigtableTableName.getTableId(), ByteString.copyFrom(increment.getRow())); Adapters.INCREMENT_ADAPTER.adapt(increment, readModifyWriteRow); return readModifyWriteRow.toProto(requestContext); }
BulkMutation( BigtableTableName tableName, BigtableDataClient client, OperationAccountant operationAccountant, ScheduledExecutorService retryExecutorService, BulkOptions bulkOptions) { this.tableName = tableName.toString(); this.client = client; this.retryExecutorService = retryExecutorService; this.operationAccountant = operationAccountant; this.maxRowKeyCount = bulkOptions.getBulkMaxRowKeyCount(); this.maxRequestSize = bulkOptions.getBulkMaxRequestSize(); this.autoflushMs = bulkOptions.getAutoflushMs(); }
/** * <p>adapt.</p> * * @param scan a {@link Scan} object. * @return a {@link ReadRowsRequest} object. */ public ReadRowsRequest adapt(Scan scan) { ReadHooks readHooks = new DefaultReadHooks(); Query query = Query.create(bigtableTableName.getTableId()); Adapters.SCAN_ADAPTER.adapt(scan, readHooks, query); readHooks.applyPreSendHook(query); return query.toProto(requestContext); }
request.setTableName(bigtableTableName.toString()); LOG.debug("Sampling rowkeys for table %s", request.getTableName());