private <T> T runReadWrite(TableReference tableRef, ReadWriteTask<T> runner) { ConnectionSupplier conns = new ConnectionSupplier(connections); try { return runner.run( dbTables.createRead(tableRef, conns), dbTables.createWrite(tableRef, conns)); } finally { conns.close(); } }
@Override public void createTable(TableReference tableRef, byte[] tableMetadata) { runDdl(tableRef, (Function<DbDdlTable, Void>) table -> { table.create(tableMetadata); return null; }); // it would be kind of nice if this was in a transaction with the DbDdlTable create, // but the code currently isn't well laid out to accommodate that putMetadataForTable(tableRef, tableMetadata); }
private TimestampsByCellResultWithToken getTimestampsByCell( DbReadTable table, Iterable<byte[]> rows, ColumnSelection columns, long timestamp, long batchSize, boolean isReverse, Token token) { try (ClosableIterator<AgnosticLightResultRow> iterator = table .getAllRows(rows, columns, timestamp, false, DbReadTable.Order.fromBoolean(isReverse))) { return TimestampsByCellResultWithToken.create(iterator, token, batchSize, isReverse); } }
private TableMetadata getOrReturnEmpty(TableReference tableRef, ConnectionSupplier conns) { TableMetadata cached = cache.getIfPresent(tableRef); if (cached != null) { return cached; } else { byte[] rawMetadata = dbTables.createMetadata(tableRef, conns).getMetadata(); TableMetadata hydrated = hydrateMetadata(rawMetadata); cache.put(tableRef, hydrated); return hydrated; } }
private static void testPartition(RowsColumnRangeBatchRequest request, int partitionSize) { List<RowsColumnRangeBatchRequest> partitions = RowsColumnRangeBatchRequests.partition(request, partitionSize); assertIntermediatePartitionsHaveNoPartialRows(partitions); assertRowsInPartitionsMatchOriginal(request, partitions); assertColumnRangesInPartitionsMatchOriginal(request, partitions); assertPartitionsHaveCorrectSize(partitions, partitionSize); }
@Override public void putMetadataForTable(TableReference tableRef, byte[] metadata) { runMetadata(tableRef, (Function<DbMetadataTable, Void>) table -> { table.putMetadata(metadata); return null; }); }
private TableMetadata getTableMetadataUsingNewConnection(TableReference tableRef) { try (ConnectionSupplier conns = new ConnectionSupplier(connectionPool)) { return tableMetadataCache.getTableMetadata(tableRef, conns); } }
@Override public void addGarbageCollectionSentinelValues(TableReference tableRef, Iterable<Cell> cells) { runWrite(tableRef, (Function<DbWriteTable, Void>) table -> { table.putSentinels(cells); return null; }); }
@Override public void compactInternally(TableReference tableRef, boolean inMaintenanceMode) { runDdl(tableRef, (Function<DbDdlTable, Void>) table -> { table.compactInternally(inMaintenanceMode); return null; }); }
@Override public void deleteRange(TableReference tableRef, RangeRequest range) { runWriteForceAutocommit(tableRef, (Function<DbWriteTable, Void>) table -> { table.delete(range); return null; }); }
@Override public void deleteAllTimestamps(TableReference tableRef, Map<Cell, Long> maxTimestampExclusiveByCell, boolean deleteSentinels) { runWriteForceAutocommit(tableRef, (Function<DbWriteTable, Void>) table -> { table.deleteAllTimestamps(maxTimestampExclusiveByCell, deleteSentinels); return null; }); }
@Override public byte[] getMetadataForTable(TableReference tableRef) { return runMetadata(tableRef, table -> table.getMetadata()); }
static void setMaxRangeOfTimestampsBatchSize(long value, ConnectionManagerAwareDbKvs kvs) { ((DbKvs) kvs.delegate()).setMaxRangeOfTimestampsBatchSize(value); }
@Override void cellGreaterOrEqualTo(byte[] rhsRow, byte[] rhsCol, FullQuery.Builder builder) { builder.append("(row_name >= ? AND (row_name > ? OR col_name >= ?))", rhsRow, rhsRow, rhsCol); } @Override
private void databaseSpecificInitialization() { runInitialization(new Function<DbTableInitializer, Void>() { @Nullable @Override public Void apply(@Nonnull DbTableInitializer initializer) { initializer.createUtilityTables(); return null; } }); }
@Override public ClusterAvailabilityStatus getClusterAvailabilityStatus() { try { checkDatabaseVersion(); return ClusterAvailabilityStatus.ALL_AVAILABLE; } catch (DbkvsVersionException e) { return ClusterAvailabilityStatus.TERMINAL; } catch (Exception e) { return ClusterAvailabilityStatus.NO_QUORUM_AVAILABLE; } }
@Override public T get(long timeout, TimeUnit unit) { return get(); } };
@Override public void put(TableReference tableRef, Map<Cell, byte[]> values, long timestamp) throws KeyAlreadyExistsException { put(tableRef, values, timestamp, true); }