/** * Use the age of the slabId as a heuristic to determine when we should ignore the lack of the "open slab marker". * <p> * When a slab is first created, the 'manifest' row is written with "open=true" but no columns are written to the * 'slab' column family. In that initial state, scanning the slab and finding no "open slab" marker does *not* * mean we've found a stale slab that can be closed or deleted. We could fix this by writing the "open slab" * marker before writing the manifest (not in the same batch mutation). But since the marker would only live for 20 * minutes without a followup write, simply checking the slabId age has exactly the same effect with fewer writes. */ private boolean isRecent(ByteBuffer slabId) { UUID uuid = TimeUUIDSerializer.get().fromByteBuffer(slabId.duplicate()); long age = System.currentTimeMillis() - TimeUUIDs.getTimeMillis(uuid); return age <= Constants.OPEN_SLAB_MARKER_TTL.toMillis(); }
private ByteBuffer generateSlabId() { return TimeUUIDSerializer.get().toByteBuffer(TimeUUIDs.newUUID()); } }
@Override public ByteBuffer getNext(ByteBuffer byteBuffer) { UUID uuid = fromByteBuffer(byteBuffer.duplicate()); return toByteBuffer(new java.util.UUID(uuid.getMostSignificantBits(), uuid.getLeastSignificantBits() + 1)); }
@Override public CompositeBuilder addTimeUUID(UUID value) { composite.addComponent(value, TimeUUIDSerializer.get(), equality); return this; }
@Override public String getString(ByteBuffer byteBuffer) { if (byteBuffer == null) return null; ByteBuffer dup = byteBuffer.duplicate(); long micros = TimeUUIDUtils.getMicrosTimeFromUUID(this.fromByteBuffer(dup)); return new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ").format(new Date(micros / 1000)); }
@Override public CompositeBuilder addTimeUUID(Long value, TimeUnit units) { composite.addComponent(TimeUUIDUtils.getMicrosTimeUUID(TimeUnit.MICROSECONDS.convert(value, units)), TimeUUIDSerializer.get(), equality); return this; }
/** * Use the age of the slabId as a heuristic to determine when we should ignore the lack of the "open slab marker". * <p> * When a slab is first created, the 'manifest' row is written with "open=true" but no columns are written to the * 'slab' column family. In that initial state, scanning the slab and finding no "open slab" marker does *not* * mean we've found a stale slab that can be closed or deleted. We could fix this by writing the "open slab" * marker before writing the manifest (not in the same batch mutation). But since the marker would only live for 20 * minutes without a followup write, simply checking the slabId age has exactly the same effect with fewer writes. */ private boolean isRecent(ByteBuffer slabId) { UUID uuid = TimeUUIDSerializer.get().fromByteBuffer(slabId.duplicate()); long age = System.currentTimeMillis() - TimeUUIDs.getTimeMillis(uuid); return age <= Constants.OPEN_SLAB_MARKER_TTL.toMillis(); }
private ByteBuffer generateSlabId() { return TimeUUIDSerializer.get().toByteBuffer(TimeUUIDs.newUUID()); } }
@Override public Placement newPlacement(String placement) throws ConnectionException { String[] parsed = PlacementUtil.parsePlacement(placement); String keyspaceName = parsed[0]; String cfPrefix = parsed[1]; CassandraKeyspace keyspace = _keyspaceMap.get(keyspaceName); if (keyspace == null) { throw new UnknownPlacementException(format( "Placement string refers to unknown or non-local Cassandra keyspace: %s", keyspaceName), placement); } KeyspaceDefinition keyspaceDef = keyspace.getAstyanaxKeyspace().describeKeyspace(); AnnotatedCompositeSerializer<DeltaKey> deltaKeySerializer = new AnnotatedCompositeSerializer<DeltaKey>(DeltaKey.class); // DDL's are not actually configurable due to the way we abstract the names from the placements here. // In the future, we should either phase out the DDL config or change the implementation here to conform to it. ColumnFamily<ByteBuffer, UUID> deltaCf = getColumnFamily(keyspaceDef, cfPrefix, "delta", placement, TimeUUIDSerializer.get()); ColumnFamily<ByteBuffer, DeltaKey> blockedDeltaCf = getColumnFamily(keyspaceDef, cfPrefix, "delta_v2", placement, deltaKeySerializer); ColumnFamily<ByteBuffer, UUID> deltaHistoryCf = getColumnFamily(keyspaceDef, cfPrefix, "history", placement, TimeUUIDSerializer.get()); // Calculate the data centers on demand since they may change in a live system. return new DeltaPlacement(placement, keyspace, deltaCf, blockedDeltaCf, deltaHistoryCf); }
@Override public boolean accept(ByteBuffer slabId, boolean open, ByteBuffer nextSlabId) { UUID nextSlabUUID = !foundStartingSlab && nextSlabId != null ? TimeUUIDSerializer.get().fromByteBuffer(nextSlabId.duplicate()) : null; // If the nextSlab's UUID is less than the desired timestamp, then skip reading events from this slab if (nextSlabUUID != null && TimeUUIDs.compareTimestamps(nextSlabUUID, sinceUUID) < 0) { return false; } if (!foundStartingSlab) { // Log the slab that we start reading from foundStartingSlab = true; _log.info("Starting to replay {} from slabid {}, for since timestamp of {}", channel, UUIDSerializer.get().fromByteBuffer(slabId), ISO_FORMATTER.format(since.toInstant())); } return true; } };
private void cacheOldestSlabForChannel(String channel, UUID slabId) { // Functionally the same as ConcurrentMap.computeIfAbsent(...) try { // Subtract 1 minute from the slab ID to allow for a reasonable window of out-of-order writes while // constraining the number of tombstones read to 1 minute's worth of rows. _oldestSlab.get(channel, () -> TimeUUIDSerializer.get().toByteBuffer( TimeUUIDs.uuidForTimeMillis(TimeUUIDs.getTimeMillis(slabId) - TimeUnit.MINUTES.toMillis(1)))); } catch (ExecutionException e) { // Won't happen, the "execution" just returns a constant. } }
@Override public Placement newPlacement(String placement) throws ConnectionException { String[] parsed = PlacementUtil.parsePlacement(placement); String keyspaceName = parsed[0]; String cfPrefix = parsed[1]; CassandraKeyspace keyspace = _keyspaceMap.get(keyspaceName); if (keyspace == null) { throw new UnknownPlacementException(format( "Placement string refers to unknown or non-local Cassandra keyspace: %s", keyspaceName), placement); } KeyspaceDefinition keyspaceDef = keyspace.getAstyanaxKeyspace().describeKeyspace(); AnnotatedCompositeSerializer<DeltaKey> deltaKeySerializer = new AnnotatedCompositeSerializer<DeltaKey>(DeltaKey.class); // DDL's are not actually configurable due to the way we abstract the names from the placements here. // In the future, we should either phase out the DDL config or change the implementation here to conform to it. ColumnFamily<ByteBuffer, UUID> deltaCf = getColumnFamily(keyspaceDef, cfPrefix, "delta", placement, TimeUUIDSerializer.get()); ColumnFamily<ByteBuffer, DeltaKey> blockedDeltaCf = getColumnFamily(keyspaceDef, cfPrefix, "delta_v2", placement, deltaKeySerializer); ColumnFamily<ByteBuffer, UUID> deltaHistoryCf = getColumnFamily(keyspaceDef, cfPrefix, "history", placement, TimeUUIDSerializer.get()); // Calculate the data centers on demand since they may change in a live system. return new DeltaPlacement(placement, keyspace, deltaCf, blockedDeltaCf, deltaHistoryCf); }
@Override public boolean accept(ByteBuffer slabId, boolean open, ByteBuffer nextSlabId) { UUID nextSlabUUID = !foundStartingSlab && nextSlabId != null ? TimeUUIDSerializer.get().fromByteBuffer(nextSlabId.duplicate()) : null; // If the nextSlab's UUID is less than the desired timestamp, then skip reading events from this slab if (nextSlabUUID != null && TimeUUIDs.compareTimestamps(nextSlabUUID, sinceUUID) < 0) { return false; } if (!foundStartingSlab) { // Log the slab that we start reading from foundStartingSlab = true; _log.info("Starting to replay {} from slabid {}, for since timestamp of {}", channel, UUIDSerializer.get().fromByteBuffer(slabId), ISO_FORMATTER.format(since.toInstant())); } return true; } };
private void cacheOldestSlabForChannel(String channel, UUID slabId) { // Functionally the same as ConcurrentMap.computeIfAbsent(...) try { // Subtract 1 minute from the slab ID to allow for a reasonable window of out-of-order writes while // constraining the number of tombstones read to 1 minute's worth of rows. _oldestSlab.get(channel, () -> TimeUUIDSerializer.get().toByteBuffer( TimeUUIDs.uuidForTimeMillis(TimeUUIDs.getTimeMillis(slabId) - TimeUnit.MINUTES.toMillis(1)))); } catch (ExecutionException e) { // Won't happen, the "execution" just returns a constant. } }
private ShardedDistributedMessageQueue(Builder builder) throws MessageQueueException { this.queueColumnFamily = ColumnFamily.newColumnFamily(builder.columnFamilyName + DEFAULT_QUEUE_SUFFIX, StringSerializer.get(), entrySerializer); this.keyIndexColumnFamily = ColumnFamily.newColumnFamily(builder.columnFamilyName + DEFAULT_METADATA_SUFFIX, StringSerializer.get(), metadataSerializer); this.historyColumnFamily = ColumnFamily.newColumnFamily(builder.columnFamilyName + DEFAULT_HISTORY_SUFFIX, StringSerializer.get(), TimeUUIDSerializer.get());
if (peekingManifestColumns.hasNext()) { cacheOldestSlabForChannel(channel, TimeUUIDSerializer.get().fromByteBuffer(peekingManifestColumns.peek().getName())); return peekingManifestColumns; } else {
if (peekingManifestColumns.hasNext()) { cacheOldestSlabForChannel(channel, TimeUUIDSerializer.get().fromByteBuffer(peekingManifestColumns.peek().getName())); return peekingManifestColumns; } else {