static public Value put( Key key, Value val, Futures fs, boolean dontCache ) { assert key != null; assert val==null || val._key == key:"non-matching keys " + ((Object)key).toString() + " != " + ((Object)val._key).toString(); while( true ) { Value old = H2O.raw_get(key); // Raw-get: do not lazy-manifest if overwriting Value res = DputIfMatch(key,val,old,fs,dontCache); if( res == old ) return old; // PUT is globally visible now? if( val != null && val._key != key ) key = val._key; } } static public Value put( Key key, Iced v ) { return put(key,v,null); }
static public Value DputIfMatch( Key key, Value val, Value old, Futures fs, boolean dontCache ) { // First: I must block repeated remote PUTs to the same Key until all prior // ones complete - the home node needs to see these PUTs in order. // Repeated PUTs on the home node are already ordered. if( old != null && !key.home() ) old.startRemotePut(); // local update first, since this is a weak update Value res = H2O.putIfMatch(key,val,old); if( res != old ) // Failed? return res; // Return fail value // Check for trivial success: no need to invalidate remotes if the new // value equals the old. if( old != null && old == val ) return old; // Trivial success? if( old != null && val != null && val.equals(old) ) return old; // Less trivial success, but no network i/o // Before we start doing distributed writes... block until the cloud // stablizes. After we start doing distrubuted writes, it is an error to // change cloud shape - the distributed writes will be in the wrong place. Paxos.lockCloud(); // The 'D' part of DputIfMatch: do Distribution. // If PUT is on HOME, invalidate remote caches // If PUT is on non-HOME, replicate/push to HOME if( key.home() ) { // On HOME? if( old != null ) old.lockAndInvalidate(H2O.SELF,fs); } else { // On non-HOME? // Start a write, but do not block for it TaskPutKey.put(key.home_node(),key,val,fs, dontCache); } return old; }
@Override public final void compute2( ) { assert _key.home() : "Atomic on wrong node; SELF="+H2O.SELF+ ", key_home="+_key.home_node()+", key_is_home="+_key.home()+", class="+getClass(); Futures fs = new Futures(); // Must block on all invalidates eventually Value val1 = DKV.get(_key); while( true ) { // Run users' function. This is supposed to read-only from val1 and // return new val2 to atomically install. Value val2 = atomic(val1); if( val2 == null ) break; // ABORT: they gave up assert val1 != val2; // No returning the same Value // Attempt atomic update Value res = DKV.DputIfMatch(_key,val2,val1,fs); if( res == val1 ) { // Success? onSuccess(val1); // Call user's post-XTN function fs.blockForPending(); // Block for any pending invalidates on the atomic update break; } val1 = res; // Otherwise try again with the current value } // and retry _key = null; // No need for key no more, don't send it back tryComplete(); // Tell F/J this task is done }
@Override public Value chunkIdx( int cidx ) { final long nchk = nChunks(); assert 0 <= cidx && cidx < nchk; Key dkey = chunkKey(cidx); Value val1 = DKV.get(dkey);// Check for an existing one... will fetch data as needed if( val1 != null ) return val1; // Found an existing one? // Lazily create a DVec for this chunk int len = (int)(cidx < nchk-1 ? CHUNK_SZ : (_len-chunk2StartElem(cidx))); // DVec is just the raw file data with a null-compression scheme Value val2 = new Value(dkey,len,null,TypeMap.C1NCHUNK,_be); val2.setdsk(); // It is already on disk. // If not-home, then block till the Key is everywhere. Most calls here are // from the parser loading a text file, and the parser splits the work such // that most puts here are on home - so this is a simple speed optimization: // do not make a Futures nor block on it on home. Futures fs = dkey.home() ? null : new Futures(); // Atomically insert: fails on a race, but then return the old version Value val3 = DKV.DputIfMatch(dkey,val2,null,fs); if( !dkey.home() && fs != null ) fs.blockForPending(); return val3 == null ? val2 : val3; } }