@Override public Set<K> keySet() { return map.keySet(); }
@Override public List<String> getPartitionNames() { return Lists.newArrayList(Collections2.transform(tokenToPartitionMap.keySet(), new Function<BigInteger, String>() { @Override public String apply(BigInteger input) { return input.toString(); } })); }
@Override public List<String> getPartitionNames() { return Lists.newArrayList(Collections2.transform(tokenToPartitionMap.keySet(), new Function<RingPosition, String>() { @Override public String apply(RingPosition input) { return input.toString(); } })); }
public Iterator<E>iterator( ) { return _map.keySet().iterator(); }
public Iterator<E>iterator( ) { return _map.keySet().iterator(); }
public Iterator<E>iterator( ) { return _map.keySet().iterator(); }
private void writeObject(java.io.ObjectOutputStream s) throws IOException { s.defaultWriteObject(); // Nothing to write for( Object K : keySet() ) { final Object V = get(K); // Do an official 'get' s.writeObject(K); // Write the <TypeK,TypeV> pair s.writeObject(V); } s.writeObject(null); // Sentinel to indicate end-of-data s.writeObject(null); }
private void writeObject(java.io.ObjectOutputStream s) throws IOException { s.defaultWriteObject(); // Nothing to write for (Object K : keySet()) { final Object V = get(K); // Do an official 'get' s.writeObject(K); // Write the <TypeK,TypeV> pair s.writeObject(V); } s.writeObject(null); // Sentinel to indicate end-of-data s.writeObject(null); }
private void writeObject(java.io.ObjectOutputStream s) throws IOException { s.defaultWriteObject(); // Nothing to write for( Object K : keySet() ) { final Object V = get(K); // Do an official 'get' s.writeObject(K); // Write the <TypeK,TypeV> pair s.writeObject(V); } s.writeObject(null); // Sentinel to indicate end-of-data s.writeObject(null); }
private void writeObject(java.io.ObjectOutputStream s) throws IOException { s.defaultWriteObject(); // Nothing to write for( Object K : keySet() ) { final Object V = get(K); // Do an official 'get' s.writeObject(K); // Write the <TypeK,TypeV> pair s.writeObject(V); } s.writeObject(null); // Sentinel to indicate end-of-data s.writeObject(null); }
/** * @return a collection of dirty CFIDs for this segment file. */ public synchronized Collection<UUID> getDirtyCFIDs() { if (cfClean.isEmpty() || cfDirty.isEmpty()) return cfDirty.keySet(); List<UUID> r = new ArrayList<>(cfDirty.size()); for (Map.Entry<UUID, AtomicInteger> dirty : cfDirty.entrySet()) { UUID cfId = dirty.getKey(); AtomicInteger dirtyPos = dirty.getValue(); AtomicInteger cleanPos = cfClean.get(cfId); if (cleanPos == null || cleanPos.intValue() < dirtyPos.intValue()) r.add(dirty.getKey()); } return r; }
/** * @return a collection of dirty CFIDs for this segment file. */ public synchronized Collection<UUID> getDirtyCFIDs() { if (cfClean.isEmpty() || cfDirty.isEmpty()) return cfDirty.keySet(); List<UUID> r = new ArrayList<>(cfDirty.size()); for (Map.Entry<UUID, IntegerInterval> dirty : cfDirty.entrySet()) { UUID cfId = dirty.getKey(); IntegerInterval dirtyInterval = dirty.getValue(); IntegerInterval.Set cleanSet = cfClean.get(cfId); if (cleanSet == null || !cleanSet.covers(dirtyInterval)) r.add(dirty.getKey()); } return r; }
/** * @return a collection of dirty CFIDs for this segment file. */ public synchronized Collection<UUID> getDirtyCFIDs() { if (cfClean.isEmpty() || cfDirty.isEmpty()) return cfDirty.keySet(); List<UUID> r = new ArrayList<>(cfDirty.size()); for (Map.Entry<UUID, IntegerInterval> dirty : cfDirty.entrySet()) { UUID cfId = dirty.getKey(); IntegerInterval dirtyInterval = dirty.getValue(); IntegerInterval.Set cleanSet = cfClean.get(cfId); if (cleanSet == null || !cleanSet.covers(dirtyInterval)) r.add(dirty.getKey()); } return r; }
/** * @return a collection of dirty CFIDs for this segment file. */ public synchronized Collection<UUID> getDirtyCFIDs() { if (cfClean.isEmpty() || cfDirty.isEmpty()) return cfDirty.keySet(); List<UUID> r = new ArrayList<>(cfDirty.size()); for (Map.Entry<UUID, IntegerInterval> dirty : cfDirty.entrySet()) { UUID cfId = dirty.getKey(); IntegerInterval dirtyInterval = dirty.getValue(); IntegerInterval.Set cleanSet = cfClean.get(cfId); if (cleanSet == null || !cleanSet.covers(dirtyInterval)) r.add(dirty.getKey()); } return r; }
/** * Creates a shallow copy of this hashtable. All the structure of the * hashtable itself is copied, but the keys and values are not cloned. * This is a relatively expensive operation. * * @return a clone of the hashtable. */ @Override public Object clone() { try { // Must clone, to get the class right; NBHM might have been // extended so it would be wrong to just make a new NBHM. NonBlockingHashMap<TypeK,TypeV> t = (NonBlockingHashMap<TypeK,TypeV>) super.clone(); // But I don't have an atomic clone operation - the underlying _kvs // structure is undergoing rapid change. If I just clone the _kvs // field, the CHM in _kvs[0] won't be in sync. // // Wipe out the cloned array (it was shallow anyways). t.clear(); // Now copy sanely for( TypeK K : keySet() ) { final TypeV V = get(K); // Do an official 'get' t.put(K,V); } return t; } catch (CloneNotSupportedException e) { // this shouldn't happen, since we are Cloneable throw new InternalError(); } }
/** * Creates a shallow copy of this hashtable. All the structure of the * hashtable itself is copied, but the keys and values are not cloned. * This is a relatively expensive operation. * * @return a clone of the hashtable. */ @Override public Object clone() { try { // Must clone, to get the class right; NBHM might have been // extended so it would be wrong to just make a new NBHM. NonBlockingHashMap<TypeK,TypeV> t = (NonBlockingHashMap<TypeK,TypeV>) super.clone(); // But I don't have an atomic clone operation - the underlying _kvs // structure is undergoing rapid change. If I just clone the _kvs // field, the CHM in _kvs[0] won't be in sync. // // Wipe out the cloned array (it was shallow anyways). t.clear(); // Now copy sanely for( TypeK K : keySet() ) { final TypeV V = get(K); // Do an official 'get' t.put(K,V); } return t; } catch (CloneNotSupportedException e) { // this shouldn't happen, since we are Cloneable throw new InternalError(); } }
/** * @param ring */ @Override public synchronized void setHosts(Collection<Host> ring) { // Temporary list of hosts to remove. Any host not in the new ring will // be removed Set<Host> hostsToRemove = Sets.newHashSet(hosts.keySet()); // Add new hosts. boolean changed = false; for (Host host : ring) { if (addHost(host, false)) changed = true; hostsToRemove.remove(host); } // Remove any hosts that are no longer in the ring for (Host host : hostsToRemove) { removeHost(host, false); changed = true; } if (changed) { topology.setPools(hosts.values()); rebuildPartitions(); } }
/** * @param ring */ @Override public synchronized void setHosts(Collection<Host> ring) { // Temporary list of hosts to remove. Any host not in the new ring will // be removed Set<Host> hostsToRemove = Sets.newHashSet(hosts.keySet()); // Add new hosts. boolean changed = false; for (Host host : ring) { if (addHost(host, false)) changed = true; hostsToRemove.remove(host); } // Remove any hosts that are no longer in the ring for (Host host : hostsToRemove) { removeHost(host, false); changed = true; } if (changed) { topology.setPools(hosts.values()); rebuildPartitions(); } }
/** * Creates a shallow copy of this hashtable. All the structure of the * hashtable itself is copied, but the keys and values are not cloned. * This is a relatively expensive operation. * * @return a clone of the hashtable. */ @Override public Object clone() { try { // Must clone, to get the class right; NBHM might have been // extended so it would be wrong to just make a new NBHM. NonBlockingHashMap<TypeK,TypeV> t = (NonBlockingHashMap<TypeK,TypeV>) super.clone(); // But I don't have an atomic clone operation - the underlying _kvs // structure is undergoing rapid change. If I just clone the _kvs // field, the CHM in _kvs[0] won't be in sync. // // Wipe out the cloned array (it was shallow anyways). t.clear(); // Now copy sanely for( TypeK K : keySet() ) { final TypeV V = get(K); // Do an official 'get' t.put(K,V); } return t; } catch (CloneNotSupportedException e) { // this shouldn't happen, since we are Cloneable throw new InternalError(); } }
/** * Creates a shallow copy of this hashtable. All the structure of the hashtable itself is copied, but the keys and * values are not cloned. This is a relatively expensive operation. * * @return a clone of the hashtable. */ @Override public Object clone() { try { // Must clone, to get the class right; NBHM might have been // extended so it would be wrong to just make a new NBHM. NonBlockingHashMap<TypeK, TypeV> t = (NonBlockingHashMap<TypeK, TypeV>) super.clone(); // But I don't have an atomic clone operation - the underlying _kvs // structure is undergoing rapid change. If I just clone the _kvs // field, the CHM in _kvs[0] won't be in sync. // // Wipe out the cloned array (it was shallow anyways). t.clear(); // Now copy sanely for (TypeK K : keySet()) { final TypeV V = get(K); // Do an official 'get' t.put(K, V); } return t; } catch (CloneNotSupportedException e) { // this shouldn't happen, since we are Cloneable throw new InternalError(); } }