public static long getTimestampInMicros( UUID uuid ) { if ( uuid == null ) { return 0; } long t = uuid.timestamp(); return ( t - KCLOCK_OFFSET ) / 10; }
/** * Get the delete timestamp from a version * @param version * @return */ public static long getTimestamp( UUID version ){ return version.timestamp(); }
public static long getTimestampInMillis( UUID uuid ) { if ( uuid == null ) { return 0; } long t = uuid.timestamp(); return ( t - KCLOCK_OFFSET ) / KCLOCK_MULTIPLIER_L; }
public static long getTimestampInMicros( UUID uuid ) { if ( uuid == null ) { return 0; } long t = uuid.timestamp(); return ( t - KCLOCK_OFFSET ) / 10; }
public static long getTimestampInMillis( UUID uuid ) { if ( uuid == null ) { return 0; } return getUnixTimestampInMillisFromUUIDTimestamp(uuid.timestamp()); }
/** * When marking nodes for deletion we must use the same unit of measure as the edge timestamps * @return */ public static long createGraphOperationTimestamp(){ return UUIDGenerator.newTimeUUID().timestamp(); }
private Date UUIDTimeStampToDate(UUID uuid) { long timeStamp = 0L; // The UUID is supposed to be time based so this should always be '1' // but this is just used for logging so we don't want to throw an error i it is misused. if (uuid.version() == 1) { // this is the difference between midnight October 15, 1582 UTC and midnight January 1, 1970 UTC as 100 nanosecond units long epochDiff = 122192928000000000L; // the UUID timestamp is in 100 nanosecond units. // convert that to milliseconds timeStamp = ((uuid.timestamp()-epochDiff)/10000); } return new Date(timeStamp); }
synchronized public void add( String queueName, DatabaseQueueMessage databaseQueueMessage ) { UUID newest = newestByQueueName.get( queueName ); if ( newest == null ) { newest = databaseQueueMessage.getQueueMessageId(); } else { if ( databaseQueueMessage.getQueueMessageId().timestamp() > newest.timestamp() ) { newest = databaseQueueMessage.getQueueMessageId(); //logger.debug("New newest for queue {} is {}", queueName, newest.timestamp()); } } newestByQueueName.put( queueName, newest ); getQueue( queueName ).add( databaseQueueMessage ); }
private List<UUID> getVersionsOlderThanMarked(final EntityCollectionManager ecm, final Id entityId, final UUID markedVersion ){ final List<UUID> versions = new ArrayList<>(); // only take last 100 versions to avoid eating memory. a tool can be built for massive cleanups for old usergrid // clusters that do not have this in-line cleanup ecm.getVersionsFromMaxToMin( entityId, markedVersion) .take(100) .forEach( mvccLogEntry -> { if ( mvccLogEntry.getVersion().timestamp() < markedVersion.timestamp() ) { versions.add(mvccLogEntry.getVersion()); } }); return versions; }
@Override public Results getCollection( String collectionName, UUID startResult, int count, Level resultsLevel, boolean reversed ) throws Exception { final String ql; if (startResult != null ) { // UUID timestamp is a different measure than 'created' field on entities Calendar uuidEpoch = Calendar.getInstance(TimeZone.getTimeZone("UTC")); uuidEpoch.clear(); uuidEpoch.set(1582, 9, 15, 0, 0, 0); // 9 = October long epochMillis = uuidEpoch.getTime().getTime(); long time = (startResult.timestamp() / 10000L) + epochMillis; if ( !reversed ) { ql = "select * where created > " + time; } else { ql = "select * where created < " + time; } } else { ql = "select *"; } Query query = Query.fromQL( ql ); if(query == null ){ throw new RuntimeException("Unable to get data for collection: "+collectionName); } query.setLimit( count ); query.setReversed( reversed ); return searchCollection( collectionName, query ); }
/** Returns a UUID that is -1 of the passed uuid, sorted by time uuid only */ public static UUID decrement( UUID uuid ) { if ( !isTimeBased( uuid ) ) { throw new IllegalArgumentException( "The uuid must be a time type" ); } //timestamp is in the 60 bit timestamp long timestamp = uuid.timestamp(); timestamp--; if ( timestamp < 0 ) { throw new IllegalArgumentException( "You must specify a time uuid with a timestamp > 0" ); } //get our bytes, then set the smaller timestamp into it byte[] uuidBytes = bytes( uuid ); setTime( uuidBytes, timestamp ); return uuid( uuidBytes ); }
/** Returns a UUID that is -1 of the passed uuid, sorted by time uuid only */ public static UUID decrement( UUID uuid ) { if ( !isTimeBased( uuid ) ) { throw new IllegalArgumentException( "The uuid must be a time type" ); } //timestamp is in the 60 bit timestamp long timestamp = uuid.timestamp(); timestamp--; if ( timestamp < 0 ) { throw new IllegalArgumentException( "You must specify a time uuid with a timestamp > 0" ); } //get our bytes, then set the smaller timestamp into it byte[] uuidBytes = bytes( uuid ); setTime( uuidBytes, timestamp ); return uuid( uuidBytes ); }
/** * Do the column update or delete for the given column and row key * * @param collectionScope We need to use this when getting the keyspace */ private MutationBatch doWrite( ApplicationScope collectionScope, Id entityId, UUID version, RowOp op ) { final MutationBatch batch = keyspace.prepareMutationBatch(); final long timestamp = version.timestamp(); if (logger.isTraceEnabled()) { logger.trace("Writing version with timestamp '{}'", timestamp); } final Id applicationId = collectionScope.getApplication(); final ScopedRowKey<K> key = createKey( applicationId, entityId ); op.doOp( batch.withRow( CF_ENTITY_LOG, key ) ); return batch; }
@Override public Shard selectShard(String queueName, String region, Shard.Type shardType, UUID pointer) { // use shard iterator to walk through shards until shard can be found ShardIterator shardIterator = new ShardIterator( cassandraClient, queueName, region, shardType, Optional.empty() ); if ( !shardIterator.hasNext() ) { String msg = MessageFormat.format( "No shards found for queue {0} region {1} type {2}", queueName, region, shardType ); throw new NotFoundException( msg ); } // walk through shards from oldest to newest Shard prev = shardIterator.next(); while ( shardIterator.hasNext() ) { Shard next = shardIterator.next(); // if item is older than the next shard, the use prev shard if ( pointer.timestamp() < next.getPointer().timestamp() ) { return prev; } prev = next; } return prev; } }
/** * Do the write on the correct row for the entity id with the operation */ private MutationBatch doWrite( final ApplicationScope applicationScope, final Id entityId, final UUID version, final RowOp op ) { final MutationBatch batch = keyspace.prepareMutationBatch(); final Id applicationId = applicationScope.getApplication(); final ScopedRowKey<Id> rowKey = ScopedRowKey.fromKey( applicationId, entityId ); final long timestamp = version.timestamp(); op.doOp( batch.withRow( CF_ENTITY_DATA, rowKey ).setTimestamp( timestamp ) ); return batch; }
@Test public void testUUIDUtils() { UUID uuid = UUIDUtils.newTimeUUID(); logger.info("" + uuid); logger.info("" + uuid.timestamp()); logger.info("" + UUIDUtils.getTimestampInMillis(uuid)); logger.info("" + UUIDUtils.getTimestampInMillis(UUIDUtils.newTimeUUID())); logger.info("" + System.currentTimeMillis()); logger.info("" + UUIDUtils.getTimestampInMicros(UUIDUtils.newTimeUUID())); logger.info("" + (System.currentTimeMillis() * 1000)); logger.info("" + UUIDUtils.MIN_TIME_UUID); logger.info("" + UUIDUtils.MIN_TIME_UUID.variant()); logger.info("" + UUIDUtils.MIN_TIME_UUID.version()); logger.info("" + UUIDUtils.MIN_TIME_UUID.clockSequence()); logger.info("" + UUIDUtils.MIN_TIME_UUID.timestamp()); logger.info("" + UUIDUtils.MAX_TIME_UUID); logger.info("" + UUIDUtils.MAX_TIME_UUID.variant()); logger.info("" + UUIDUtils.MAX_TIME_UUID.version()); logger.info("" + UUIDUtils.MAX_TIME_UUID.clockSequence()); logger.info("" + UUIDUtils.MAX_TIME_UUID.timestamp()); }
.withTimestamp( opTimestamp.timestamp() );
final long time = uuid.timestamp(); assertTrue("Incorrect time", current + 10000 - time > 0); final UUID[] uuids = new UUID[COUNT];