protected UriBuilder getPollUriBuilder(String subscription, Duration claimTtl, int limit) { return _databus.clone() .segment(subscription, "poll") .queryParam("ttl", Ttls.toSeconds(claimTtl, 0, Integer.MAX_VALUE)) .queryParam("limit", limit) .queryParam("partitioned", _partitionSafe); }
@Override public UpdateRequest writeRecords(UUID dataId, Collection<ByteBuffer> records) { if (!records.isEmpty()) { ColumnListMutation<ByteBuffer> row = _mutation.withRow(CF_DEDUP_DATA, dataId); for (ByteBuffer record : records) { row.putColumn(record, EMPTY_BUFFER, Ttls.toSeconds(_eventTtl, 1, null)); } } return this; }
@Override public UpdateRequest writeRecords(UUID dataId, Collection<ByteBuffer> records) { if (!records.isEmpty()) { ColumnListMutation<ByteBuffer> row = _mutation.withRow(CF_DEDUP_DATA, dataId); for (ByteBuffer record : records) { row.putColumn(record, EMPTY_BUFFER, Ttls.toSeconds(_eventTtl, 1, null)); } } return this; }
@Timed(name = "bv.emodb.databus.CqlSubscriptionDAO.insertSubscription", absolute = true) @Override public void insertSubscription(String ownerId, String subscription, Condition tableFilter, Duration subscriptionTtl, Duration eventTtl) { insertSubscription(new DefaultOwnedSubscription( subscription, tableFilter, new Date(_clock.millis() + subscriptionTtl.toMillis()), Duration.ofSeconds(Ttls.toSeconds(eventTtl, 1, Integer.MAX_VALUE)), ownerId) ); }
@Timed(name = "bv.emodb.databus.CqlSubscriptionDAO.insertSubscription", absolute = true) @Override public void insertSubscription(String ownerId, String subscription, Condition tableFilter, Duration subscriptionTtl, Duration eventTtl) { insertSubscription(new DefaultOwnedSubscription( subscription, tableFilter, new Date(_clock.millis() + subscriptionTtl.toMillis()), Duration.ofSeconds(Ttls.toSeconds(eventTtl, 1, Integer.MAX_VALUE)), ownerId) ); }
@Override public void compact(String apiKey, String table, String key, @Nullable Duration ttlOverride, ReadConsistency readConsistency, WriteConsistency writeConsistency) { checkNotNull(table, "table"); checkNotNull(key, "key"); checkNotNull(readConsistency, "readConsistency"); checkNotNull(writeConsistency, "writeConsistency"); try { Integer ttlOverrideSeconds = (ttlOverride != null) ? Ttls.toSeconds(ttlOverride, 0, Integer.MAX_VALUE) : null; URI uri = _dataStore.clone() .segment(table, key, "compact") .queryParam("ttl", (ttlOverrideSeconds != null) ? new Object[]{ttlOverrideSeconds} : new Object[0]) .queryParam("readConsistency", readConsistency) .queryParam("writeConsistency", writeConsistency) .build(); _client.resource(uri) .header(ApiKeyRequest.AUTHENTICATION_HEADER, apiKey) .post(); } catch (EmoClientException e) { throw convertException(e); } }
@Override public UpdateRequest writeSegment(UUID segment, String internalState) { Duration manifestTtl = Duration.ofDays(31); _mutation.withRow(CF_DEDUP_MD, queue) .putColumn(segment, internalState, Ttls.toSeconds(manifestTtl, 1, null)); return this; }
@Override public UpdateRequest writeSegment(UUID segment, String internalState) { Duration manifestTtl = Duration.ofDays(31); _mutation.withRow(CF_DEDUP_MD, queue) .putColumn(segment, internalState, Ttls.toSeconds(manifestTtl, 1, null)); return this; }
@Override public void subscribe(@Credential String apiKey, String subscription, Condition tableFilter, Duration subscriptionTtl, Duration eventTtl, boolean includeDefaultJoinFilter) { checkNotNull(subscription, "subscription"); checkNotNull(tableFilter, "tableFilter"); try { URI uri = _databus.clone() .segment(subscription) .queryParam("ttl", Ttls.toSeconds(subscriptionTtl, 0, (int) Duration.ofDays(30).getSeconds())) .queryParam("eventTtl", Ttls.toSeconds(eventTtl, 0, (int) Duration.ofDays(30).getSeconds())) .queryParam("includeDefaultJoinFilter", Boolean.toString(includeDefaultJoinFilter)) .build(); _client.resource(uri) .type(JSON_CONDITION_MEDIA_TYPE) .header(ApiKeyRequest.AUTHENTICATION_HEADER, apiKey) .put(tableFilter.toString()); } catch (EmoClientException e) { throw convertException(e); } }
protected void doRenew(String apiKey, String queue, Collection<String> messageIds, Duration claimTtl) { checkNotNull(queue, "queue"); checkNotNull(messageIds, "messageIds"); checkNotNull(claimTtl, "claimTtl"); try { URI uri = _queueService.clone() .segment(queue, "renew") .queryParam("ttl", Ttls.toSeconds(claimTtl, 0, Integer.MAX_VALUE)) .queryParam("partitioned", _partitionSafe) .build(); _client.resource(uri) .type(MediaType.APPLICATION_JSON_TYPE) .header(ApiKeyRequest.AUTHENTICATION_HEADER, apiKey) .post(messageIds); } catch (EmoClientException e) { throw convertException(e); } }
protected List<Message> doPoll(String apiKey, String queue, Duration claimTtl, int limit) { checkNotNull(queue, "queue"); checkNotNull(claimTtl, "claimTtl"); try { URI uri = _queueService.clone() .segment(queue, "poll") .queryParam("ttl", Ttls.toSeconds(claimTtl, 0, Integer.MAX_VALUE)) .queryParam("limit", limit) .queryParam("partitioned", _partitionSafe) .build(); return _client.resource(uri) .accept(MediaType.APPLICATION_JSON_TYPE) .header(ApiKeyRequest.AUTHENTICATION_HEADER, apiKey) .get(new TypeReference<List<Message>>() {}); } catch (EmoClientException e) { throw convertException(e); } }
@Override public void renew(String apiKey, @PartitionKey String subscription, Collection<String> eventKeys, Duration claimTtl) { checkNotNull(subscription, "subscription"); checkNotNull(eventKeys, "eventKeys"); checkNotNull(claimTtl, "claimTtl"); try { URI uri = _databus.clone() .segment(subscription, "renew") .queryParam("ttl", Ttls.toSeconds(claimTtl, 0, Integer.MAX_VALUE)) .queryParam("partitioned", _partitionSafe) .build(); _client.resource(uri) .type(MediaType.APPLICATION_JSON_TYPE) .header(ApiKeyRequest.AUTHENTICATION_HEADER, apiKey) .post(eventKeys); } catch (EmoClientException e) { throw convertException(e); } }
@Override public void commit(List<History> historyList, Object rowKey) { if (historyList != null && !historyList.isEmpty()) { ColumnListMutation<UUID> historyMutation = _mutation.withRow(_columnFamily, (ByteBuffer)rowKey); for (History history : historyList) { historyMutation.putColumn(history.getChangeId(), _changeEncoder.encodeHistory(history), Ttls.toSeconds(_historyStore.getHistoryTtl(), 1, null)); } } }
@Override public void commit(List<History> historyList, Object rowKey) { if (historyList != null && !historyList.isEmpty()) { ColumnListMutation<UUID> historyMutation = _mutation.withRow(_columnFamily, (ByteBuffer)rowKey); for (History history : historyList) { historyMutation.putColumn(history.getChangeId(), _changeEncoder.encodeHistory(history), Ttls.toSeconds(_historyStore.getHistoryTtl(), 1, null)); } } }
private void save(String channel, ByteBuffer slabId, boolean open, ConsistencyLevel consistency) { MutationBatch mutation = _keyspace.prepareMutationBatch(consistency); Duration ttl = getTtl(channel, open); mutation.withRow(ColumnFamilies.MANIFEST, channel) .putColumn(slabId, open, Ttls.toSeconds(ttl, 1, null)); // Readers check for the open slab marker to see if a slab is open and may not re-read the manifest open // flag very often. So delete the open slab marker so readers notice the state change more quickly. if (!open) { mutation.withRow(ColumnFamilies.SLAB, slabId) .deleteColumn(Constants.OPEN_SLAB_MARKER); } execute(mutation); }
private void save(String channel, ByteBuffer slabId, boolean open, ConsistencyLevel consistency) { MutationBatch mutation = _keyspace.prepareMutationBatch(consistency); Duration ttl = getTtl(channel, open); mutation.withRow(ColumnFamilies.MANIFEST, channel) .putColumn(slabId, open, Ttls.toSeconds(ttl, 1, null)); // Readers check for the open slab marker to see if a slab is open and may not re-read the manifest open // flag very often. So delete the open slab marker so readers notice the state change more quickly. if (!open) { mutation.withRow(ColumnFamilies.SLAB, slabId) .deleteColumn(Constants.OPEN_SLAB_MARKER); } execute(mutation); }
@ParameterizedTimed(type="AstyanaxStorageProvider") @Override public void writeChunk(Table tbl, String blobId, int chunkId, ByteBuffer data, Duration ttl, long timestamp) { AstyanaxTable table = (AstyanaxTable) checkNotNull(tbl, "table"); for (AstyanaxStorage storage : table.getWriteStorage()) { BlobPlacement placement = (BlobPlacement) storage.getPlacement(); // Write two columns: one small one and one big one with the binary data. Readers can query on // the presence of the small one to be confident that the big column has replicated and is available. MutationBatch mutation = placement.getKeyspace().prepareMutationBatch(CONSISTENCY_STRONG) .setTimestamp(timestamp); Integer ttlSeconds = Ttls.toSeconds(ttl, 1, null); mutation.withRow(placement.getBlobColumnFamily(), storage.getRowKey(blobId)) .putEmptyColumn(getColumn(ColumnGroup.B, chunkId), ttlSeconds) .putColumn(getColumn(ColumnGroup.Z, chunkId), data, ttlSeconds); execute(mutation); _blobWriteMeter.mark(data.remaining()); } }
@Override public void commit(List<History> historyList, Object rowKey) { if (historyList != null && !historyList.isEmpty()) { for (History history : historyList) { _batchStatement.add(QueryBuilder.insertInto(_tableDDL.getTableMetadata()) .value(_tableDDL.getRowKeyColumnName(), rowKey) .value(_tableDDL.getChangeIdColumnName(), history.getChangeId()) .value(_tableDDL.getValueColumnName(), _changeEncoder.encodeHistory(history)) .using(ttl(Ttls.toSeconds(_historyStore.getHistoryTtl(), 1, null))) .setConsistencyLevel(_consistencyLevel)); } } }
@Override public void commit(List<History> historyList, Object rowKey) { if (historyList != null && !historyList.isEmpty()) { for (History history : historyList) { _batchStatement.add(QueryBuilder.insertInto(_tableDDL.getTableMetadata()) .value(_tableDDL.getRowKeyColumnName(), rowKey) .value(_tableDDL.getChangeIdColumnName(), history.getChangeId()) .value(_tableDDL.getValueColumnName(), _changeEncoder.encodeHistory(history)) .using(ttl(Ttls.toSeconds(_historyStore.getHistoryTtl(), 1, null))) .setConsistencyLevel(_consistencyLevel)); } } }
@ParameterizedTimed(type="AstyanaxStorageProvider") @Override public void writeMetadata(Table tbl, String blobId, StorageSummary summary, Duration ttl) { AstyanaxTable table = (AstyanaxTable) checkNotNull(tbl, "table"); for (AstyanaxStorage storage : table.getWriteStorage()) { BlobPlacement placement = (BlobPlacement) storage.getPlacement(); MutationBatch mutation = placement.getKeyspace().prepareMutationBatch(CONSISTENCY_STRONG) .setTimestamp(summary.getTimestamp()); Integer ttlSeconds = Ttls.toSeconds(ttl, 1, null); mutation.withRow(placement.getBlobColumnFamily(), storage.getRowKey(blobId)) .putColumn(getColumn(ColumnGroup.A, 0), JsonHelper.asJson(summary), ttlSeconds); execute(mutation); } }