@Override public Hasher putLong(long l) { for (Hasher hasher : hashers) { hasher.putLong(l); } return this; }
public StatisticsHasher putLong(long value) { hasher.putLong(value); return this; }
@Override public Hasher putLong(long l) { for (Hasher hasher : hashers) { hasher.putLong(l); } return this; }
@Override public Hasher putLong(long l) { for (Hasher hasher : hashers) { hasher.putLong(l); } return this; }
@Override public HashCode hashLong(long input) { return newHasher(8).putLong(input).hash(); }
public StatisticsHasher putOptionalLong(boolean present, long value) { hasher.putBoolean(present); hasher.putLong(present ? value : 0); return this; }
public void putLongs(long[] array) { hasher.putInt(array.length); for (long entry : array) { hasher.putLong(entry); } }
@Override public HashCode hashLong(long input) { return newHasher(8).putLong(input).hash(); }
@Override public HashCode hashLong(long input) { return newHasher(8).putLong(input).hash(); }
@Override public void hashRow(BaseLongColumnValueSelector selector, Hasher hasher) { if (NullHandling.replaceWithDefault() || !selector.isNull()) { hasher.putLong(selector.getLong()); } }
private static void assertHashLongEquivalence(HashFunction hashFunction, Random random) { long l = random.nextLong(); assertEquals(hashFunction.hashLong(l), hashFunction.newHasher().putLong(l).hash()); }
/** * In most cases we want to use identity equality for StopPatterns. There is a single StopPattern instance for each * semantic StopPattern, and we don't want to calculate complicated hashes or equality values during normal * execution. However, in some cases we want a way to consistently identify trips across versions of a GTFS feed, when the * feed publisher cannot ensure stable trip IDs. Therefore we define some additional hash functions. */ public HashCode semanticHash(HashFunction hashFunction) { Hasher hasher = hashFunction.newHasher(); for (int s = 0; s < size; s++) { Stop stop = stops[s]; // Truncate the lat and lon to 6 decimal places in case they move slightly between feed versions hasher.putLong((long) (stop.getLat() * 1000000)); hasher.putLong((long) (stop.getLon() * 1000000)); } // Use hops rather than stops because drop-off at stop 0 and pick-up at last stop are not important // and have changed between OTP versions. for (int hop = 0; hop < size - 1; hop++) { hasher.putInt(pickups[hop]); hasher.putInt(dropoffs[hop + 1]); } return hasher.hash(); }
int hv = Math.abs(mHashFunc.newHasher().putLong(options.getBlockId()).hash().asInt()); int index = hv % workerInfos.size(); for (BlockWorkerInfo blockWorkerInfoUnused : workerInfos) {
/** * Hash our data into a consistent long */ @Override protected Hasher doHash( final ApplicationScope scope, final DirectedEdgeMeta directedEdgeMeta, final ShardEntryGroup shardEntryGroup ) { final Hasher hasher = super.doHash( scope, directedEdgeMeta, shardEntryGroup ); // add the compaction target to the hash final Shard compactionTarget = shardEntryGroup.getCompactionTarget(); hasher.putLong( compactionTarget.getShardIndex() ); return hasher; } }
hasher.putLong(0x0000000001000101L); assertEquals(hashCode, hasher.hash().asLong());
public Consumer<Long> longValue() { return value -> { putKind(Kind.LONG); hasher.putLong(value); }; }
@Override public DescriptionWriter putField(String name, long value) { hasher.putInt(name.length()) .putString(name, Charsets.UTF_8) .putLong(value); return this; }