private int nextInstanceCounter(String algoName, NonBlockingHashMap<String, Integer> instanceCounters) { synchronized (instanceCounters) { int instanceNum = 1; if (instanceCounters.containsKey(algoName)) instanceNum = instanceCounters.get(algoName) + 1; instanceCounters.put(algoName, instanceNum); return instanceNum; } }
private static Enum [] enums(Key eKey, int ncols){ if(!_enums.containsKey(eKey)){ Enum [] enums = new Enum[ncols]; for(int i = 0; i < enums.length; ++i)enums[i] = new Enum(); _enums.putIfAbsent(eKey, enums); } return _enums.get(eKey); }
/** Removes all of the mappings from this map. */ @Override public void clear() { // Smack a new empty table down Object[] newkvs = new NonBlockingHashMap(MIN_SIZE)._kvs; while( !CAS_kvs(_kvs,newkvs) ) // Spin until the clear works ; }
@Override public void reduce( GroupTask gt) { for (Group g : gt._grp2val.keySet()) { Double val = gt._grp2val.get(g); if (g != null && val != null) _grp2val.putIfAbsent(g, val); } }
@Override public void reduce( ddplyPass1 p1 ) { assert _groups != p1._groups; // Fold 2 hash tables together. // Get the larger hash table in m0, smaller in m1 NonBlockingHashMap<Group,NewChunk> m0 = _groups; NonBlockingHashMap<Group,NewChunk> m1 = p1._groups; if( m0.size() < m1.size() ) { NonBlockingHashMap<Group,NewChunk> tmp=m0; m0=m1; m1=tmp; } // Iterate over smaller table, folding into larger table. for( Group g : m1.keySet() ) { NewChunk nc0 = m0.get(g); NewChunk nc1 = m1.get(g); if( nc0 == null || nc0._len == 0) m0.put(g,nc1); // unimplemented: expected to blow out on large row counts, where we // actually need a collection of chunks, not 1 uber-chunk else if( _gatherRows ) { // All longs are monotonically in-order. Not sure if this is needed // but it's an easy invariant to keep and it makes reading row#s easier. if( nc0._len > 0 && nc1._len > 0 && // len==0 for reduces from remotes (since no rows sent) nc0.at8_impl(nc0._len-1) >= nc1.at8_impl(0) ) nc0.addr(nc1); else if (nc1._len != 0) nc0.add (nc1); } } _groups = m0; p1._groups = null; } @Override public String toString() { return _groups==null ? null : _groups.toString(); }
/** * Creates a shallow copy of this hashtable. All the structure of the * hashtable itself is copied, but the keys and values are not cloned. * This is a relatively expensive operation. * * @return a clone of the hashtable. */ @Override public Object clone() { try { // Must clone, to get the class right; NBHM might have been // extended so it would be wrong to just make a new NBHM. NonBlockingHashMap<TypeK,TypeV> t = (NonBlockingHashMap<TypeK,TypeV>) super.clone(); // But I don't have an atomic clone operation - the underlying _kvs // structure is undergoing rapid change. If I just clone the _kvs // field, the CHM in _kvs[0] won't be in sync. // // Wipe out the cloned array (it was shallow anyways). t.clear(); // Now copy sanely for( TypeK K : keySet() ) { final TypeV V = get(K); // Do an official 'get' t.put(K,V); } return t; } catch (CloneNotSupportedException e) { // this shouldn't happen, since we are Cloneable throw new InternalError(); } }
/** Tests if the key in the table using the <tt>equals</tt> method. * @return <tt>true</tt> if the key is in the table using the <tt>equals</tt> method * @throws NullPointerException if the specified key is null */ @Override public boolean containsKey( Object key ) { return get(key) != null; }
/** * Add key to this map (treated as hash set in this case). * All keys are added with value = 1. * @param str */ public int addKey(ValueString str) { // _map is shared and be cast to null (if enum is killed) -> grab local copy NonBlockingHashMap<ValueString, Integer> m = _map; if( m == null ) return Integer.MAX_VALUE; // Nuked already Integer res = m.get(str); if(res != null ) return res; // Recorded already assert str.get_length() < 65535; // Length limit so 65535 can be used as a sentinel Integer newVal = new Integer(_id.incrementAndGet()); res = m.putIfAbsent(new ValueString(str.toString()), newVal); if(res != null)return res; if(m.size() > H2O.DATA_MAX_FACTOR_LEVELS){ kill(); return Integer.MAX_VALUE; } return newVal; } public final boolean containsKey(Object key){return _map.containsKey(key);}
@Override public void map(Key key) { _lEnums = new Enum[H2O.CLOUD.size()][]; if(MultiFileParseTask._enums.containsKey(_k)){ _lEnums[H2O.SELF.index()] = _gEnums = MultiFileParseTask._enums.get(_k); // if we are the original node (i.e. there will be no sending over // wire), we have to clone the enums not to share the same object // (causes problems when computing column domain and renumbering maps). if( H2O.SELF.index() == _homeNode ) { _gEnums = _gEnums.clone(); for(int i = 0; i < _gEnums.length; ++i) _gEnums[i] = _gEnums[i].clone(); } MultiFileParseTask._enums.remove(_k); } }
public static final H2ONode intern( H2Okey key ) { H2ONode h2o = INTERN.get(key); if( h2o != null ) return h2o; final int idx = UNIQUE.getAndIncrement(); h2o = new H2ONode(key,idx); H2ONode old = INTERN.putIfAbsent(key,h2o); if( old != null ) return old; synchronized(H2O.class) { while( idx >= IDX.length ) IDX = Arrays.copyOf(IDX,IDX.length<<1); IDX[idx] = h2o; } return h2o; } public static final H2ONode intern( InetAddress ip, int port ) { return intern(new H2Okey(ip,port)); }
private void writeObject(java.io.ObjectOutputStream s) throws IOException { s.defaultWriteObject(); // Nothing to write for( Object K : keySet() ) { final Object V = get(K); // Do an official 'get' s.writeObject(K); // Write the <TypeK,TypeV> pair s.writeObject(V); } s.writeObject(null); // Sentinel to indicate end-of-data s.writeObject(null); }
@Override public void lcompute() { ddplyPass1 p1 = ddplyPass1.PASS1TMP.remove(_p1key); Futures fs = new Futures(); int cidx = H2O.SELF.index(); for( int i=0; i<_dss.length; i++ ) { // For all possible groups // Get the newchunk of local rows for a group Group g = new Group(_dss[i]); NewChunk nc = p1._groups == null ? null : p1._groups.get(g); if( nc != null && nc._len > 0 ) { // Fill in fields we punted on during construction nc._vec = _avs[i]; // Assign a proper vector nc.close(cidx,fs); // Close & compress chunk } else { // All nodes have a chunk, even if its empty DKV.put(_avs[i].chunkKey(cidx), new C0LChunk(0,0),fs); } } fs.blockForPending(); _p1key = null; // No need to return these _dss = null; tryComplete(); } @Override public void reduce( ddplyPass2 p2 ) {
@Override public Enum read( AutoBuffer ab ) { assert _map == null || _map.size()==0; _map = null; if( ab.get1() == 1 ) return this; // Killed? _maxId = ab.get4(); _map = new NonBlockingHashMap<ValueString, Integer>(); int len = 0; while( (len = ab.get2()) != 65535 ) // Read until end-of-map marker _map.put(new ValueString(ab.getA1(len)),ab.get4()); return this; } }
@Override public GroupTask read( AutoBuffer ab ) { super.read(ab); int len = ab.get4(); if( len == 0 ) return this; _grp2val= new NonBlockingHashMap<Group,Double>(); for( int i=0; i<len; i++ ) _grp2val.put(ab.get(Group.class),ab.get8d()); return this; } @Override public void copyOver( Freezable dt ) {
@Override public void map(Chunk[] cs) { if (_grp2val == null) _grp2val = new NonBlockingHashMap<Group, Double>(); if (_cols == null) { _cols = new int[cs.length-1]; for (int i = 0; i < _cols.length; ++i) _cols[i] = i; } int rows = cs[0].len(); Chunk vals = cs[cs.length-1]; for (int row = 0; row < rows; ++row) { Group g = new Group(_cols.length); g.fill(row, cs, _cols); double val = vals.at0(row); _grp2val.putIfAbsent(g, val); } }
@Override void apply(Env env, int argcnt, ASTApply apply) { Thread cThr = Thread.currentThread(); Frame fr = env.peekAry(); int cols[] = new int[fr.numCols()]; for( int i=0; i<cols.length; i++ ) cols[i]=i; ddplyPass1 p1 = new ddplyPass1( false, cols ).doAll(fr); double dss[][] = new double[p1._groups.size()][]; int i=0; for( Group g : p1._groups.keySet() ) dss[i++] = g._ds; Frame res = FrameUtils.frame(fr._names,dss); env.poppush(2,res,null); } }
/** Add {@code o} to the set. * @return <tt>true</tt> if {@code o} was added to the set, <tt>false</tt> * if {@code o} was already in the set. */ public boolean add ( final E o ) { return _map.putIfAbsent(o,V) != V; }
public static Value get( H2ONode target, Key key, int priority ) { RPC<TaskGetKey> rpc, old; while( true ) { // Repeat until we get a unique TGK installed per key // Do we have an old TaskGetKey in-progress? rpc = TGKS.get(key); if( rpc != null && rpc._dt._priority >= priority ) break; old = rpc; // Make a new TGK. rpc = new RPC(target,new TaskGetKey(key,priority),1.0f); if( TGKS.putIfMatchUnlocked(key,rpc,old) == old ) { rpc.setTaskNum().call(); // Start the op break; // Successful install of a fresh RPC } } Value val = rpc.get()._val; // Block for, then fetch out the result TGKS.putIfMatchUnlocked(key,null,rpc); // Clear from cache return val; }