/** * Little helper to set the table byte array. If it's different than the last * table we reset the byte array. Otherwise we just use the existing array. * @param table The table we're operating against */ private void setTable(final String table) { if (!lastTable.equals(table)) { lastTable = table; BigtableTableName tableName = options .getInstanceName() .toTableName(table); lastTableBytes = tableName .toString() .getBytes(); synchronized(this) { if (bulkMutation != null) { try { bulkMutation.flush(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } bulkMutation = session.createBulkMutation(tableName); } } }
/** * <p>toBigtableName.</p> * * @param tableName a {@link org.apache.hadoop.hbase.TableName} object. * @return a {@link java.lang.String} object. */ protected String toBigtableName(TableName tableName) { return bigtableInstanceName.toTableNameStr(tableName.getNameAsString()); }
/** {@inheritDoc} */ @Override public ListenableFuture<List<String>> listTablesAsync() { ListTablesRequest request = ListTablesRequest.newBuilder() .setParent(instanceName.toString()) .build(); ListenableFuture<ListTablesResponse> response = adminClient.listTablesAsync(request); return Futures.transform(response, new Function<ListTablesResponse, List<String>>() { @Override public List<String> apply(ListTablesResponse input) { ImmutableList.Builder<String> tableIdsBuilder = ImmutableList.builder(); for(com.google.bigtable.admin.v2.Table tableProto : input.getTablesList()){ tableIdsBuilder.add(instanceName.toTableId(tableProto.getName())); } return tableIdsBuilder.build(); } }, MoreExecutors.directExecutor()); }
/** {@inheritDoc} */ @Override public List<String> listTables() { ListTablesRequest requestProto = ListTablesRequest.newBuilder() .setParent(instanceName.toString()) .build(); ListTablesResponse response = adminClient.listTables(requestProto); ImmutableList.Builder<String> tableIdsBuilder = ImmutableList.builder(); for(com.google.bigtable.admin.v2.Table tableProto : response.getTablesList()){ tableIdsBuilder.add(instanceName.toTableId(tableProto.getName())); } return tableIdsBuilder.build(); }
@Test public void testE2EBigtableWrite() throws Exception { final String tableName = bigtableOptions.getInstanceName().toTableNameStr(tableId); final String instanceName = bigtableOptions.getInstanceName().toString(); final int numRows = 1000; final List<KV<ByteString, ByteString>> testData = generateTableData(numRows);
private CompletableFuture<List<TableDescriptor>> listTables(Optional<Pattern> tableNamePattern) { return requestTableList().thenApply(r -> r.stream() .filter(t -> !tableNamePattern.isPresent() || tableNamePattern.get().matcher(bigtableInstanceName.toTableId(t.getName())).matches()) .map(tableAdapter2x::adapt) .collect(Collectors.toList()) ); }
/** * Sets a cluster size to a specific size. * @param clusterId * @param zoneId * @param newSize * @throws InterruptedException if the cluster is in the middle of updating, and an interrupt was * received */ public void setClusterSize(String clusterId, String zoneId, int newSize) throws InterruptedException { setClusterSize(instanceName.toClusterName(clusterId).getClusterName(), newSize); }
/** * @param tableName a {@link TableName} object for exception identification. * @param request a {@link CreateTableRequest} object to send. * @throws java.io.IOException if any. */ protected ListenableFuture<Table> createTableAsync(final TableName tableName, CreateTableRequest request) throws IOException { ListenableFuture<Table> future = bigtableTableAdminClient.createTableAsync( request.toProto(bigtableInstanceName.toAdminInstanceName())); final SettableFuture<Table> settableFuture = SettableFuture.create(); Futures.addCallback(future, new FutureCallback<Table>() { @Override public void onSuccess(@Nullable Table result) { settableFuture.set(result); } @Override public void onFailure(Throwable t) { settableFuture.setException(convertToTableExistsException(tableName, t)); } }, MoreExecutors.directExecutor()); return settableFuture; }
/** * <p>toBigtableName.</p> * * @param tableName a {@link org.apache.hadoop.hbase.TableName} object. * @return a {@link java.lang.String} object. */ private String toBigtableName(TableName tableName) { return bigtableInstanceName.toTableNameStr(tableName.getNameAsString()); }
/** * Request a list of Tables for the cluster. The {@link Table}s in the response will only * contain fully qualified Bigtable table names, and not column family information. */ private ListTablesResponse requestTableList() throws IOException { try { ListTablesRequest.Builder builder = ListTablesRequest.newBuilder(); builder.setParent(bigtableInstanceName.toString()); return bigtableTableAdminClient.listTables(builder.build()); } catch (Throwable throwable) { throw new IOException("Failed to listTables", throwable); } }
private CompletableFuture<List<TableName>> listTableNames(Optional<Pattern> tableNamePattern) { return requestTableList().thenApply(r -> r.stream().map(e -> bigtableInstanceName.toTableId(e.getName())) .filter(e -> !tableNamePattern.isPresent() || tableNamePattern.get().matcher(e).matches()) .map(TableName::valueOf) .collect(Collectors.toList()) ); }
public BigtableAsyncAdmin(CommonConnection asyncConnection) throws IOException { LOG.debug("Creating BigtableAsyncAdmin"); this.options = asyncConnection.getOptions(); this.bigtableTableAdminClient = new BigtableTableAdminClient( asyncConnection.getSession().getTableAdminClient()); this.disabledTables = asyncConnection.getDisabledTables(); this.bigtableInstanceName = options.getInstanceName(); this.tableAdapter2x = new TableAdapter2x(options); this.asyncConnection = asyncConnection; this.configuration = asyncConnection.getConfiguration(); String clusterId = configuration.get(BigtableOptionsFactory.BIGTABLE_SNAPSHOT_CLUSTER_ID_KEY, null); if (clusterId != null) { bigtableSnapshotClusterName = bigtableInstanceName.toClusterName(clusterId); } }
/** * <p>modifyColumns.</p> * * @param tableName a {@link org.apache.hadoop.hbase.TableName} object. * @param modifications a {@link ModifyTableBuilder} object. */ private CompletableFuture<Void> modifyColumns(TableName tableName, ModifyTableBuilder modifications) { ModifyColumnFamiliesRequest request = modifications.build(); return bigtableTableAdminClient .modifyColumnFamilyAsync(request.toProto(bigtableInstanceName.toAdminInstanceName())) .thenApply(r -> null); }
/** * <p>toBigtableName.</p> * * @param tableName a {@link org.apache.hadoop.hbase.TableName} object. * @return a {@link java.lang.String} object. */ private String toBigtableName(TableName tableName) { return bigtableInstanceName.toTableNameStr(tableName.getNameAsString()); }
private CompletableFuture<List<Table>> requestTableList() { ListTablesRequest request = ListTablesRequest.newBuilder().setParent(bigtableInstanceName.toString()).build(); return bigtableTableAdminClient.listTablesAsync(request) .thenApply(r -> r.getTablesList()); }
/** * Convert a list of Bigtable {@link Table}s to hbase {@link TableName}. */ private TableName[] asTableNames(List<Table> tablesList) { TableName[] result = new TableName[tablesList.size()]; for (int i = 0; i < tablesList.size(); i++) { // This will contain things like project, zone and cluster. String bigtableFullTableName = tablesList.get(i).getName(); // Strip out the Bigtable info. String name = bigtableInstanceName.toTableId(bigtableFullTableName); result[i] = TableName.valueOf(name); } return result; }
@Override public BigtableWriterImpl openForWriting(String tableId) throws IOException { BigtableSession session = new BigtableSession(options); BigtableTableName tableName = options.getInstanceName().toTableName(tableId); return new BigtableWriterImpl(session, tableName); }
public BigtableAsyncAdmin(CommonConnection asyncConnection) throws IOException { LOG.debug("Creating BigtableAsyncAdmin"); this.options = asyncConnection.getOptions(); this.bigtableTableAdminClient = new BigtableTableAdminClient( asyncConnection.getSession().getTableAdminClient()); this.disabledTables = asyncConnection.getDisabledTables(); this.bigtableInstanceName = options.getInstanceName(); this.tableAdapter2x = new TableAdapter2x(options); this.asyncConnection = asyncConnection; this.configuration = asyncConnection.getConfiguration(); String clusterId = configuration.get(BigtableOptionsFactory.BIGTABLE_SNAPSHOT_CLUSTER_ID_KEY, null); if (clusterId != null) { bigtableSnapshotClusterName = bigtableInstanceName.toClusterName(clusterId); } }
/** * @param request a {@link CreateTableRequest} object to send. * @throws java.io.IOException if any. */ protected void createTable(TableName tableName, CreateTableRequest request) throws IOException { try { bigtableTableAdminClient.createTable( request.toProto(bigtableInstanceName.toAdminInstanceName())); } catch (Throwable throwable) { throw convertToTableExistsException(tableName, throwable); } }
/** * <p>toBigtableName.</p> * * @param tableName a {@link org.apache.hadoop.hbase.TableName} object. * @return a {@link java.lang.String} object. */ protected String toBigtableName(TableName tableName) { return bigtableInstanceName.toTableNameStr(tableName.getNameAsString()); }