public H2ONode home_node( ) { H2O cloud = H2O.CLOUD; return cloud._memary[home(cloud)]; }
public final RPC<Atomic<T>> fork(Key key) { _key = key; if( key.home() ) { // Key is home? compute2(); // Also, run it blocking/now return null; } else { // Else run it remotely return RPC.call(key.home_node(),this); } }
/** Return true if blocking is unnecessary. * Alas, used in TWO places and the blocking API forces them to share here. */ @Override public boolean isReleasable() { int r = _rwlock.get(); if( _key.home() ) { // Called from lock_and_invalidate // Home-key blocking: wait for active-GET count to fall to zero return r == 0; } else { // Called from start_put // Remote-key blocking: wait for active-PUT lock to hit -1 assert r == 1 || r == -1; // Either waiting (1) or done (-1) but not started(0) return r == -1; // done! } } /** Possibly blocks the current thread. Returns true if isReleasable would
public static Key fetch(Key key) { FetchProducer fp = new FetchProducer(key); if (key.home()) fp.compute2(); else fp = RPC.call(key.home_node(), fp).get(); return fp._producer; } private FetchProducer(Key k) { _key = k; }
@Override protected void setupLocal() { _model_mem_size = 0; for (int i=0; i< trees_so_far; ++i) { Key<CompressedTree>[] per_class = _treeKeys[i]; for (int j=0; j<per_class.length; ++j) { if (per_class[j] == null) continue; if (!per_class[j].home()) continue; // only look at homed tree keys _model_mem_size += DKV.get(per_class[j])._max; } } }
@Override public final void compute2(){ if(_key.home()){ Value val = H2O.get(_key); if(val != null) { V v = val.get(); map(v); } tryComplete(); } else new RPC(_key.home_node(),this).addCompleter(this).call(); } // onCompletion must be empty here, may be invoked twice (on remote and local)
/** Initialize the _replicas field for a PUT. On the Home node (for remote * PUTs), it is initialized to the one replica we know about, and not * read-locked. Used on a new Value about to be PUT on the Home node. */ void initReplicaHome( H2ONode h2o, Key key ) { assert key.home(); assert _key == null; // This is THE initializing key write for serialized Values assert h2o != H2O.SELF; // Do not track self as a replica _key = key; // Set the replica bit for the one node we know about, and leave the // rest clear. _replicas.add(h2o._unique_idx); _rwlock.set(0); // No GETs are in-flight at this time. //System.out.println(key+", init "+_rwlock.get()); }
@Override public void store(Value v) { if( !v._key.home() ) return; throw H2O.unimpl(); // VA only }
public int home ( H2O cloud ) { return home (cloud_info(cloud)); } public int replica( H2O cloud ) { return replica(cloud_info(cloud)); }
/** Block this thread until all prior remote PUTs complete - to force * remote-PUT ordering on the home node. */ void startRemotePut() { assert !_key.home(); int x = 0; // assert I am waiting on threads with higher priority? while( (x=_rwlock.get()) != -1 ) // Spin until rwlock==-1 if( x == 1 || RW_CAS(0,1,"remote_need_notify") ) try { ForkJoinPool.managedBlock(this); } catch( InterruptedException e ) { } }
/** The PUT for this Value has completed. Wakeup any blocked later PUTs. */ void completeRemotePut() { assert !_key.home(); // Attempt an eager blind attempt, assuming no blocked pending notifies if( RW_CAS(0, -1,"remote_complete") ) return; synchronized(this) { boolean res = RW_CAS(1, -1,"remote_do_notify"); assert res; // Must succeed notifyAll(); // Wake up pending blocked PUTs } }
@Override public void dinvoke( H2ONode sender ) { _h2o = sender; Key k = _key; _key = null; // Not part of the return result assert k.home(); // Gets are always from home (less we do replication) // Shipping a result? Track replicas so we can invalidate. There's a // narrow race on a moving K/V mapping tracking this Value just as it gets // deleted - in which case, simply retry for another Value. do _val = H2O.get(k); // The return result while( _val != null && !_val.setReplica(sender) ); tryComplete(); } @Override public void compute2() { throw H2O.unimpl(); }
@Override public final void lonCompletion( CountedCompleter caller ) { Futures fs = new Futures(); for(Vec v:vs) if(v._key.home()) DKV.put(v._key,v,fs); fs.blockForPending(); } @Override public void reduce(DRemoteTask drt){}
public Value(Key k, Freezable pojo) { _key = k; _pojo = pojo; _type = (short)pojo.frozenType(); _mem = pojo.write(new AutoBuffer()).buf(); _max = _mem.length; _persist = ICE; _rwlock = new AtomicInteger(0); _replicas = k.home() ? new NonBlockingSetInt() : null; } // Nullary constructor for weaving
@Override public void lcompute(){ long row=0; // Start row Key k; for( int i=0; i<nchunks; i++ ) { long nrow = chunk2StartElem(i+1); // Next row if((k = v0.chunkKey(i)).home()) DKV.put(k,new C0LChunk(l,(int)(nrow-row)),_fs); row = nrow; } tryComplete(); } @Override public void reduce(DRemoteTask drt){}
public Value(Key k, Iced pojo, byte be ) { _key = k; _pojo = pojo; _type = (short)pojo.frozenType(); _mem = (pojo instanceof Chunk)?((Chunk)pojo).getBytes():pojo.write(new AutoBuffer()).buf(); _max = _mem.length; // For the ICE backend, assume new values are not-yet-written. // For HDFS & NFS backends, assume we from global data and preserve the // passed-in persist bits byte p = (byte)(be&BACKEND_MASK); _persist = (p==ICE) ? p : be; _rwlock = new AtomicInteger(0); _replicas = k.home() ? new NonBlockingSetInt() : null; } public Value(Key k, Freezable pojo) {
@Override public void dinvoke( H2ONode sender ) { assert !_key.home(); // No point in sending Keys to home // Update ONLY if there is not something there already. // Update only a bare Value, with no backing data. // Real data can be fetched on demand. Value val = new Value(_key,_max,null,_type,_be); Value old = H2O.raw_get(_key); while( old == null && H2O.putIfMatch(_key,val,null) != null ) old = H2O.raw_get(_key); _key = null; // No return result tryComplete(); } @Override public void compute2() { throw H2O.unimpl(); }
/** Return chunk index of the first chunk on this node. Used to identify the trees built here.*/ private long getChunkId(final Frame fr) { Key[] keys = new Key[fr.anyVec().nChunks()]; for(int i = 0; i < fr.anyVec().nChunks(); ++i) { keys[i] = fr.anyVec().chunkKey(i); } for(int i = 0; i < keys.length; ++i) { if (keys[i].home()) return i; } return -99999; //throw new Error("No key on this node"); }
@Override public void lcompute(){ getFutures(); long row=0; // Start row Key k; for( int i=0; i<nchunks; i++ ) { long nrow = chunk2StartElem(i+1); // Next row if((k = v0.chunkKey(i)).home()) DKV.put(k,new C0DChunk(d,(int)(nrow-row)),_fs); row = nrow; } tryComplete(); } @Override public void reduce(DRemoteTask drt){}
@Override public void compute2() { if( (_count < 0 || --_count >= 0) && (_node._job == null || Job.isRunning(_node._job)) ) { for( Chunk[] cs : _node._chunks ) { DescentChunk task = new DescentChunk(); task._node = _node; task._cs = cs; H2O.submitTask(task); } reinitialize(); H2O.submitTask(this); } else { if( _node._key.home() ) _node._trainer.done(); } } }