/** Tests if the key in the table using the <tt>equals</tt> method. * @return <tt>true</tt> if the key is in the table using the <tt>equals</tt> method * @throws NullPointerException if the specified key is null */ @Override public boolean containsKey( Object key ) { return get(key) != null; }
/** * @return Returns the match for {@code o} if {@code o} is in the set. */ public E get( final E o ) { return (E)_map.get(o); }
public static Value get( Key key ) { return STORE.get(key); } public static Value raw_get( Key key ) { return STORE.get(key); }
public static Value raw_get( Key key ) { return STORE.get(key); } public static Key getk( Key key ) { return STORE.getk(key); }
public int getTokenId(ValueString str){ Integer I = _map.get(str); assert I != null : "missing value! " + str.toString(); return I; }
private void writeObject(java.io.ObjectOutputStream s) throws IOException { s.defaultWriteObject(); // Nothing to write for( Object K : keySet() ) { final Object V = get(K); // Do an official 'get' s.writeObject(K); // Write the <TypeK,TypeV> pair s.writeObject(V); } s.writeObject(null); // Sentinel to indicate end-of-data s.writeObject(null); }
private int nextInstanceCounter(String algoName, NonBlockingHashMap<String, Integer> instanceCounters) { synchronized (instanceCounters) { int instanceNum = 1; if (instanceCounters.containsKey(algoName)) instanceNum = instanceCounters.get(algoName) + 1; instanceCounters.put(algoName, instanceNum); return instanceNum; } }
public static final H2ONode intern( H2Okey key ) { H2ONode h2o = INTERN.get(key); if( h2o != null ) return h2o; final int idx = UNIQUE.getAndIncrement(); h2o = new H2ONode(key,idx); H2ONode old = INTERN.putIfAbsent(key,h2o); if( old != null ) return old; synchronized(H2O.class) { while( idx >= IDX.length ) IDX = Arrays.copyOf(IDX,IDX.length<<1); IDX[idx] = h2o; } return h2o; } public static final H2ONode intern( InetAddress ip, int port ) { return intern(new H2Okey(ip,port)); }
@Override public void reduce( GroupTask gt) { for (Group g : gt._grp2val.keySet()) { Double val = gt._grp2val.get(g); if (g != null && val != null) _grp2val.putIfAbsent(g, val); } }
private static Enum [] enums(Key eKey, int ncols){ if(!_enums.containsKey(eKey)){ Enum [] enums = new Enum[ncols]; for(int i = 0; i < enums.length; ++i)enums[i] = new Enum(); _enums.putIfAbsent(eKey, enums); } return _enums.get(eKey); }
@Override public void map(Chunk[] cs) { Chunk c = cs[col_id]; int rows = cs[0].len(); for (int r = 0; r < rows; ++r) { if (c.isNA0(r) || (c._vec.isEnum() && c._vec.domain()[(int) c.at0(r)].equals("NA"))) { Group g = new Group(_cols.length); g.fill(r, cs, _cols); if (grp2val._grp2val.get(g) == null) continue; double rv = grp2val._grp2val.get(g); c.set0(r, rv); } } } }.doAll(source);
static public int onIce(String className) { Integer I = MAP.get(className); if( I != null ) return I; // Need to install a new cloud-wide type ID for className assert H2O.CLOUD.size() > 0 : "No cloud when getting type id for "+className; int id = -1; if( H2O.CLOUD.leader() != H2O.SELF ) // Not leader? id = FetchId.fetchId(className); return install(className,id); }
/** * Creates a shallow copy of this hashtable. All the structure of the * hashtable itself is copied, but the keys and values are not cloned. * This is a relatively expensive operation. * * @return a clone of the hashtable. */ @Override public Object clone() { try { // Must clone, to get the class right; NBHM might have been // extended so it would be wrong to just make a new NBHM. NonBlockingHashMap<TypeK,TypeV> t = (NonBlockingHashMap<TypeK,TypeV>) super.clone(); // But I don't have an atomic clone operation - the underlying _kvs // structure is undergoing rapid change. If I just clone the _kvs // field, the CHM in _kvs[0] won't be in sync. // // Wipe out the cloned array (it was shallow anyways). t.clear(); // Now copy sanely for( TypeK K : keySet() ) { final TypeV V = get(K); // Do an official 'get' t.put(K,V); } return t; } catch (CloneNotSupportedException e) { // this shouldn't happen, since we are Cloneable throw new InternalError(); } }
/** * Add key to this map (treated as hash set in this case). * All keys are added with value = 1. * @param str */ public int addKey(ValueString str) { // _map is shared and be cast to null (if enum is killed) -> grab local copy NonBlockingHashMap<ValueString, Integer> m = _map; if( m == null ) return Integer.MAX_VALUE; // Nuked already Integer res = m.get(str); if(res != null ) return res; // Recorded already assert str.get_length() < 65535; // Length limit so 65535 can be used as a sentinel Integer newVal = new Integer(_id.incrementAndGet()); res = m.putIfAbsent(new ValueString(str.toString()), newVal); if(res != null)return res; if(m.size() > H2O.DATA_MAX_FACTOR_LEVELS){ kill(); return Integer.MAX_VALUE; } return newVal; } public final boolean containsKey(Object key){return _map.containsKey(key);}
@Override public void reduce( ddplyPass1 p1 ) { assert _groups != p1._groups; // Fold 2 hash tables together. // Get the larger hash table in m0, smaller in m1 NonBlockingHashMap<Group,NewChunk> m0 = _groups; NonBlockingHashMap<Group,NewChunk> m1 = p1._groups; if( m0.size() < m1.size() ) { NonBlockingHashMap<Group,NewChunk> tmp=m0; m0=m1; m1=tmp; } // Iterate over smaller table, folding into larger table. for( Group g : m1.keySet() ) { NewChunk nc0 = m0.get(g); NewChunk nc1 = m1.get(g); if( nc0 == null || nc0._len == 0) m0.put(g,nc1); // unimplemented: expected to blow out on large row counts, where we // actually need a collection of chunks, not 1 uber-chunk else if( _gatherRows ) { // All longs are monotonically in-order. Not sure if this is needed // but it's an easy invariant to keep and it makes reading row#s easier. if( nc0._len > 0 && nc1._len > 0 && // len==0 for reduces from remotes (since no rows sent) nc0.at8_impl(nc0._len-1) >= nc1.at8_impl(0) ) nc0.addr(nc1); else if (nc1._len != 0) nc0.add (nc1); } } _groups = m0; p1._groups = null; } @Override public String toString() { return _groups==null ? null : _groups.toString(); }
@Override public AutoBuffer write( AutoBuffer ab ) { super.write(ab); if( _grp2val == null ) return ab.put4(0); ab.put4(_grp2val.size()); for( Group g : _grp2val.keySet() ) { ab.put(g); ab.put8d(_grp2val.get(g)); } return ab; }
@Override public void map(Key key) { _lEnums = new Enum[H2O.CLOUD.size()][]; if(MultiFileParseTask._enums.containsKey(_k)){ _lEnums[H2O.SELF.index()] = _gEnums = MultiFileParseTask._enums.get(_k); // if we are the original node (i.e. there will be no sending over // wire), we have to clone the enums not to share the same object // (causes problems when computing column domain and renumbering maps). if( H2O.SELF.index() == _homeNode ) { _gEnums = _gEnums.clone(); for(int i = 0; i < _gEnums.length; ++i) _gEnums[i] = _gEnums[i].clone(); } MultiFileParseTask._enums.remove(_k); } }
public static Value get( H2ONode target, Key key, int priority ) { RPC<TaskGetKey> rpc, old; while( true ) { // Repeat until we get a unique TGK installed per key // Do we have an old TaskGetKey in-progress? rpc = TGKS.get(key); if( rpc != null && rpc._dt._priority >= priority ) break; old = rpc; // Make a new TGK. rpc = new RPC(target,new TaskGetKey(key,priority),1.0f); if( TGKS.putIfMatchUnlocked(key,rpc,old) == old ) { rpc.setTaskNum().call(); // Start the op break; // Successful install of a fresh RPC } } Value val = rpc.get()._val; // Block for, then fetch out the result TGKS.putIfMatchUnlocked(key,null,rpc); // Clear from cache return val; }
@Override public AutoBuffer write( AutoBuffer ab ) { if( _map == null ) return ab.put1(1); // Killed map marker ab.put1(0); // Not killed ab.put4(maxId()); for( ValueString key : _map.keySet() ) ab.put2((char)key.get_length()).putA1(key.get_buf(),key.get_length()).put4(_map.get(key)); return ab.put2((char)65535); // End of map marker }
@Override public void lcompute() { ddplyPass1 p1 = ddplyPass1.PASS1TMP.remove(_p1key); Futures fs = new Futures(); int cidx = H2O.SELF.index(); for( int i=0; i<_dss.length; i++ ) { // For all possible groups // Get the newchunk of local rows for a group Group g = new Group(_dss[i]); NewChunk nc = p1._groups == null ? null : p1._groups.get(g); if( nc != null && nc._len > 0 ) { // Fill in fields we punted on during construction nc._vec = _avs[i]; // Assign a proper vector nc.close(cidx,fs); // Close & compress chunk } else { // All nodes have a chunk, even if its empty DKV.put(_avs[i].chunkKey(cidx), new C0LChunk(0,0),fs); } } fs.blockForPending(); _p1key = null; // No need to return these _dss = null; tryComplete(); } @Override public void reduce( ddplyPass2 p2 ) {