/** * Increment the value for a given key and return the resulting value. * * @param key the key to increment * @return the incremented value of that key */ @ReadWrite public long incrementAndGet(byte[] key, long value) { return this.table.incrementAndGet(key, KEY_COLUMN, value); }
@ReadWrite @Override public PartitionConsumerResult consumePartitions(PartitionConsumerState partitionConsumerState) { return consumePartitions(partitionConsumerState, Integer.MAX_VALUE, new Predicate<PartitionDetail>() { @Override public boolean apply(@Nullable PartitionDetail input) { return true; } }); }
@ReadWrite @Override public PartitionConsumerResult consumePartitions(PartitionConsumerState partitionConsumerState) { return consumePartitions(partitionConsumerState, Integer.MAX_VALUE, new Predicate<PartitionDetail>() { @Override public boolean apply(@Nullable PartitionDetail input) { return true; } }); }
/** * Increments the value for a counter for a row and timestamp. * * @param counter the name of the counter to increment * @param amount the amount to increment by * @param timestamp timestamp of the entry * @param tags optional list of tags associated with the counter. See {@link TimeseriesTable} class description * for more details. * @return value of the entry after increment */ @ReadWrite public long increment(byte[] counter, long amount, long timestamp, byte[]... tags) { return internalIncrement(counter, amount, timestamp, tags); }
/** * Compares-and-swaps (atomically) the value of the specified row and column * by looking for the specified expected value and, if found, replacing with * the specified new value. * * @param key key to modify * @param oldValue expected value before change * @param newValue value to set * @return true if compare and swap succeeded, false otherwise (stored value is different from expected) */ @ReadWrite public boolean compareAndSwap(byte[] key, byte[] oldValue, byte[] newValue) { return this.table.compareAndSwap(key, KEY_COLUMN, oldValue, newValue); }
@ReadWrite @Override public Row incrementAndGet(byte[] row, byte[][] columns, long[] amounts) { ensureTransactionIsStarted(); return internalIncrementAndGet(row, columns, amounts); }
@ReadWrite @Override public Row incrementAndGet(byte[] row, byte[][] columns, long[] amounts) { ensureTransactionIsStarted(); return internalIncrementAndGet(row, columns, amounts); }
/** * Increments (atomically) the specified row and columns by the specified amounts, and returns the new values. * Note that performing this operation on an indexed column will generally have a negative impact on performance, * since up to three writes will need to be performed for every increment (one removing the index for the previous, * pre-increment value, one adding the index for the incremented value, and one for the increment itself). * * @see Table#incrementAndGet(Increment) */ @ReadWrite @Override public Row incrementAndGet(Increment increment) { Map<byte[], Long> incrementValues = increment.getValues(); Collection<Long> values = incrementValues.values(); long[] longValues = new long[values.size()]; int i = 0; for (long value : values) { longValues[i++] = value; } return incrementAndGet(increment.getRow(), incrementValues.keySet().toArray(new byte[incrementValues.size()][]), longValues); }
@ReadWrite @Override public Row incrementAndGet(Increment increment) { Preconditions.checkArgument(!increment.getValues().isEmpty(), "Increment must have at least one value"); byte[][] columns = new byte[increment.getValues().size()][]; long[] values = new long[increment.getValues().size()]; int i = 0; for (Map.Entry<byte[], Long> columnValue : increment.getValues().entrySet()) { columns[i] = columnValue.getKey(); values[i] = columnValue.getValue(); i++; } return incrementAndGet(increment.getRow(), columns, values); }
@ReadWrite @Override public Row incrementAndGet(Increment increment) { Preconditions.checkArgument(!increment.getValues().isEmpty(), "Increment must have at least one value"); byte[][] columns = new byte[increment.getValues().size()][]; long[] values = new long[increment.getValues().size()]; int i = 0; for (Map.Entry<byte[], Long> columnValue : increment.getValues().entrySet()) { columns[i] = columnValue.getKey(); values[i] = columnValue.getValue(); i++; } return incrementAndGet(increment.getRow(), columns, values); }
@ReadWrite @Override public long incrementAndGet(byte[] row, byte[] column, long amount) { byte[] result = incrementAndGet(row, new byte[][]{column}, new long[]{amount}).get(column); return Bytes.toLong(result); }
@ReadWrite @Override public long incrementAndGet(byte[] row, byte[] column, long amount) { byte[] result = incrementAndGet(row, new byte[][]{column}, new long[]{amount}).get(column); return Bytes.toLong(result); }
/** * Increments (atomically) the specified row and column by the specified amount, and returns the new value. * Note that performing this operation on an indexed column will generally have a negative impact on performance, * since up to three writes will need to be performed for every increment (one removing the index for the previous, * pre-increment value, one adding the index for the incremented value, and one for the increment itself). * * @see Table#incrementAndGet(byte[], byte[], long) */ @ReadWrite @Override public long incrementAndGet(byte[] row, byte[] column, long amount) { byte[] newValue = incrementAndGet(row, new byte[][]{ column }, new long[]{ amount }).get(column); return Bytes.toLong(newValue); }
@ReadWrite @Override public Future<Void> concatenatePartition(PartitionKey key) { PartitionDetail partition = getPartition(key); if (partition == null) { throw new PartitionNotFoundException(key, getName()); } try { if (exploreEnabled) { return exploreFacadeProvider.get().concatenatePartition(datasetInstanceId, spec, key); } else { return Futures.immediateFuture(null); } } catch (Exception e) { throw new DataSetException(String.format( "Unable to concatenate partition for key %s from explore table.", key.toString()), e); } }
@ReadWrite @Override public Future<Void> concatenatePartition(PartitionKey key) { PartitionDetail partition = getPartition(key); if (partition == null) { throw new PartitionNotFoundException(key, getName()); } try { if (exploreEnabled) { return exploreFacadeProvider.get().concatenatePartition(datasetInstanceId, spec, key); } else { return Futures.immediateFuture(null); } } catch (Exception e) { throw new DataSetException(String.format( "Unable to concatenate partition for key %s from explore table.", key.toString()), e); } }
@ReadWrite @Override public void readWrite() { read(); write(); }
@ReadWrite @Override public void readWrite() { read(); write(); }
@ReadWrite @Override public Row incrementAndGet(byte[] row, byte[][] columns, long[] amounts) {
@ReadWrite @Override public PartitionConsumerResult consumePartitions(PartitionConsumerState partitionConsumerState, int limit,
@ReadWrite @Override public boolean compareAndSwap(byte[] row, byte[] column, byte[] expectedValue, byte[] newValue) { ensureTransactionIsStarted(); // TODO: add support for empty values; see https://issues.cask.co/browse/TEPHRA-45 for details. if (newValue != null && newValue.length == 0) { warnAboutEmptyValue(column); } // NOTE: there is more efficient way to do it, but for now we want more simple implementation, not over-optimizing byte[][] columns = new byte[][]{column}; try { byte[] currentValue = getRowMap(row, columns).get(column); reportRead(1); if (Arrays.equals(expectedValue, currentValue)) { putInternal(row, columns, new byte[][]{newValue}); reportWrite(1, getSize(row) + getSize(column) + getSize(newValue)); return true; } } catch (Exception e) { LOG.debug("compareAndSwap failed for table: " + getTransactionAwareName() + ", row: " + Bytes.toStringBinary(row), e); throw new DataSetException("compareAndSwap failed", e); } return false; }