public MemcachedNode getPrimary(final String k) { MemcachedNode rv = getNodeForKey(hashAlg.hash(k)); assert rv != null : "Found no node for key " + k; return rv; }
public KetamaNodeLocator(List<MemcachedNode> nodes, HashAlgorithm alg, KetamaNodeLocatorConfiguration conf) { super(); allNodes = nodes; hashAlg = alg; ketamaNodes = new TreeMap<Long, MemcachedNode>(); config = conf; int numReps = config.getNodeRepetitions(); for (MemcachedNode node : nodes) { // Ketama does some special work with md5 where it reuses chunks. if (alg == HashAlgorithm.KETAMA_HASH) { for (int i = 0; i < numReps / 4; i++) { byte[] digest = HashAlgorithm.computeMd5(config.getKeyForNode(node, i)); for (int h = 0; h < 4; h++) { Long k = ((long) (digest[3 + h * 4] & 0xFF) << 24) | ((long) (digest[2 + h * 4] & 0xFF) << 16) | ((long) (digest[1 + h * 4] & 0xFF) << 8) | (digest[h * 4] & 0xFF); ketamaNodes.put(k, node); } } } else { for (int i = 0; i < numReps; i++) { ketamaNodes.put(hashAlg.hash(config.getKeyForNode(node, i)), node); } } } assert ketamaNodes.size() == numReps * nodes.size(); }
private void updateHash(MemcachedReplicaGroup group, boolean remove) { // Ketama does some special work with md5 where it reuses chunks. for (int i = 0; i < config.getNodeRepetitions() / 4; i++) { byte[] digest = HashAlgorithm.computeMd5(config.getKeyForGroup(group, i)); for (int h = 0; h < 4; h++) { Long k = ((long) (digest[3 + h * 4] & 0xFF) << 24) | ((long) (digest[2 + h * 4] & 0xFF) << 16) | ((long) (digest[1 + h * 4] & 0xFF) << 8) | (digest[h * 4] & 0xFF); if (remove) ketamaGroups.remove(k); else ketamaGroups.put(k, group); } } }
public KetamaNodeLocator(List<MemcachedNode> nodes, HashAlgorithm alg, KetamaNodeLocatorConfiguration conf) { super(); allNodes = nodes; hashAlg = alg; ketamaNodes=new TreeMap<Long, MemcachedNode>(); config= conf; int numReps= config.getNodeRepetitions(); for(MemcachedNode node : nodes) { // Ketama does some special work with md5 where it reuses chunks. if(alg == HashAlgorithm.KETAMA_HASH) { for(int i=0; i<numReps / 4; i++) { byte[] digest=HashAlgorithm.computeMd5(config.getKeyForNode(node, i)); for(int h=0;h<4;h++) { Long k = ((long)(digest[3+h*4]&0xFF) << 24) | ((long)(digest[2+h*4]&0xFF) << 16) | ((long)(digest[1+h*4]&0xFF) << 8) | (digest[h*4]&0xFF); ketamaNodes.put(k, node); } } } else { for(int i=0; i<numReps; i++) { ketamaNodes.put(hashAlg.hash(config.getKeyForNode(node, i)), node); } } } assert ketamaNodes.size() == numReps * nodes.size(); }
byte[] bKey=computeMd5(k); rv = ((long) (bKey[3] & 0xFF) << 24) | ((long) (bKey[2] & 0xFF) << 16)
public MemcachedNode getPrimary(String k) { if (partialStringHash.get()) { final int index = k.indexOf(hashDelimiter.get()); if (index > 0) { k = k.substring(0, index); } } final long _hash = hashingAlgorithm.hash(k); Long hash = Long.valueOf(_hash); hash = ketamaNodes.ceilingKey(hash); if (hash == null) { hash = ketamaNodes.firstKey(); } return ketamaNodes.get(hash); }
byte[] digest=HashAlgorithm.computeMd5(config.getKeyForNode(node, i)); for(int h=0;h<4;h++) { Long k = ((long)(digest[3+h*4]&0xFF) << 24) newNodeMap.put(hashAlg.hash(config.getKeyForNode(node, i)), node);
byte[] bKey = computeMd5(k); rv = ((long) (bKey[3] & 0xFF) << 24) | ((long) (bKey[2] & 0xFF) << 16)
final Long hashL = Long.valueOf(hashingAlgorithm.hash(config.getKeyForNode(node, i))); newNodeMap.put(hashL, node);
byte[] bKey=computeMd5(k); rv = ((long) (bKey[3] & 0xFF) << 24) | ((long) (bKey[2] & 0xFF) << 16)
newNodeMap.put(hashAlg.hash(config.getKeyForNode(node, i)), node);
private void updateHash(MemcachedNode node, boolean remove) { if (!remove) { config.insertNode(node); } // Ketama does some special work with md5 where it reuses chunks. for (int i = 0; i < config.getNodeRepetitions() / 4; i++) { byte[] digest = HashAlgorithm.computeMd5(config.getKeyForNode(node, i)); for (int h = 0; h < 4; h++) { Long k = ((long) (digest[3 + h * 4] & 0xFF) << 24) | ((long) (digest[2 + h * 4] & 0xFF) << 16) | ((long) (digest[1 + h * 4] & 0xFF) << 8) | (digest[h * 4] & 0xFF); if (remove) { ketamaNodes.remove(k); } else { ketamaNodes.put(k, node); } } } if (remove) { config.removeNode(node); } }
private void nextHash() { // this.calculateHash(Integer.toString(tries)+key).hashCode(); long tmpKey = hashAlg.hash((numTries++) + key); // This echos the implementation of Long.hashCode() hashVal += (int) (tmpKey ^ (tmpKey >>> 32)); hashVal &= 0xffffffffL; /* truncate to 32-bits */ remainingTries--; }
private void nextHash() { // this.calculateHash(Integer.toString(tries)+key).hashCode(); long tmpKey = hashAlg.hash((numTries++) + key); // This echos the implementation of Long.hashCode() hashVal += (int) (tmpKey ^ (tmpKey >>> 32)); hashVal &= 0xffffffffL; /* truncate to 32-bits */ remainingTries--; }
private void nextHash() { // this.calculateHash(Integer.toString(tries)+key).hashCode(); long tmpKey = hashAlg.hash((numTries++) + key); // This echos the implementation of Long.hashCode() hashVal += (int) (tmpKey ^ (tmpKey >>> 32)); hashVal &= 0xffffffffL; /* truncate to 32-bits */ remainingTries--; }
@Override public int getVbucketByKey(String key) { int digest = (int) hashAlgorithm.hash(key); return digest & mask; }
public ReplKetamaIterator(final String k, ReplicaPick p, final int t) { super(); hashVal = hashAlg.hash(k); remainingTries = t; key = k; pick = p; }
public KetamaIterator(final String k, final int t) { super(); hashVal = hashAlg.hash(k); remainingTries = t; key = k; }
public KetamaIterator(final String k, final int t) { super(); hashVal = hashAlg.hash(k); remainingTries = t; key = k; }
private int getServerForKey(String key) { int rv = (int) (hashAlg.hash(key) % nodes.length); assert rv >= 0 : "Returned negative key for key " + key; assert rv < nodes.length : "Invalid server number " + rv + " for key " + key; return rv; }