@Override public void onCompletion(CountedCompleter cc) { _v._domain = _dom; UKV.put(_v._key,_v); } }
@Override public void put( Object key, Object val ) { UKV.put((Key)key,(Value)val); } @Override public Value getValue( Object key ) { return UKV.getValue((Key)key); }
/** Save this tree into DKV store under the given Key. */ public Key save(Key k) { CompressedTree ts = compress(); UKV.put(k, ts); return k; }
static public void put(String s, Value v) { put(Key.make(s), v); } static public void remove(String s) { remove(Key.make(s)); }
static public void put( Key key, Iced val, Futures fs ) { put(key,new Value(key, val),fs); } // Do the DKV.put. DISALLOW this interface for Lockables. Lockables all
static public void put( Key key, Value val ) { Futures fs = new Futures(); put(key,val,fs); fs.blockForPending(); // Block for remote-put to complete } static public void put( Key key, Iced val, Futures fs ) { put(key,new Value(key, val),fs); }
public static void stall_till_cloudsize(int x, long ms) { H2O.waitForCloudSize(x, ms); UKV.put(Job.LIST, new Job.List()); // Jobs.LIST must be part of initial keys } }
private ParseDataset2(Key dest, Key[] fkeys) { destination_key = dest; // Job progress Key _progress = Key.make((byte) 0, Key.JOB); UKV.put(_progress, ParseProgress.make(fkeys)); }
static public void put( Key key, Iced fr ) { if( fr == null ) UKV.remove(key); else UKV.put(key,new Value(key, fr)); }
static public void put( Key key, Freezable fr ) { if( fr == null ) UKV.remove(key); else UKV.put(key,new Value(key, fr)); }
/** Start this task based on given top-level fork-join task representing job computation. * @param fjtask top-level job computation task. * @return this job in {@link JobState#RUNNING} state * * @see JobState * @see H2OCountedCompleter */ public /** FIXME: should be final or at least protected */ Job start(final H2OCountedCompleter fjtask) { assert state == JobState.CREATED : "Trying to run job which was already run?"; assert fjtask != null : "Starting a job with null working task is not permitted! Fix you API"; _fjtask = fjtask; start_time = System.currentTimeMillis(); state = JobState.RUNNING; // Save the full state of the job UKV.put(self(), this); // Update job list new TAtomic<List>() { @Override public List atomic(List old) { if( old == null ) old = new List(); Key[] jobs = old._jobs; old._jobs = Arrays.copyOf(jobs, jobs.length + 1); old._jobs[jobs.length] = job_key; return old; } }.invoke(LIST); return this; }
public Key toCompressedKey() { AutoBuffer bs = new AutoBuffer(); TreeModel.CompressedTree compressedTree = compress(); Key key = Key.make((byte)1,Key.DFJ_INTERNAL_USER, H2O.SELF); UKV.put(key, new Value(key, compressedTree)); return key; }
public ChunkProgressJob(long chunksTotal, Key destinationKey) { destination_key = destinationKey; _progress = Key.make(Key.make()._kb, (byte) 0, Key.DFJ_INTERNAL_USER, destinationKey.home_node()); UKV.put(_progress, new ChunkProgress(chunksTotal)); }
@Override protected void execImpl() { UKV.put(destination_key, this); int max = jobs[0].gridParallelism(); int head = 0, tail = 0; while( head < jobs.length && isRunning(self()) ) { if( tail - head < max && tail < jobs.length ) jobs[tail++].fork(); else { try { jobs[head++].get(); } catch( Exception e ) { throw new RuntimeException(e); } } } }
/** * Creates a new transformation from given values to given indexes of * given domain. * @param values values being mapped from * @param indexes values being mapped to * @param domain domain of new vector * @return always return a new vector which maps given values into a new domain */ public Vec makeTransf(final int[] values, final int[] indexes, final String[] domain) { if( _espc == null ) throw H2O.unimpl(); Vec v0 = new TransfVec(values, indexes, domain, this._key, group().addVecs(1)[0],_espc); UKV.put(v0._key,v0); return v0; } /**
@Override protected AutoBuffer postLoad(Model m, AutoBuffer ab) { int ntrees = ab.get4(); Futures fs = new Futures(); for (int i=0; i<ntrees; i++) { CompressedTree[] ts = ab.getA(CompressedTree.class); for (int j=0; j<ts.length; j++) { Key k = ((TreeModel) m).treeKeys[i][j]; assert k == null && ts[j] == null || k != null && ts[j] != null : "Incosistency in model serialization: key is null but model is not null, OR vice versa!"; if (k!=null) { UKV.put(k, ts[j], fs); } } } fs.blockForPending(); return ab; } };
public static void stall_till_cloudsize(int x, long ms) { H2O.waitForCloudSize(x, ms); UKV.put(Job.LIST, new Job.List()); // Jobs.LIST must be part of initial keys }
@Override public Response serve() { JsonObject response = new JsonObject(); Key k = Key.make(_key.value()._kb, (byte) (int)_rf.value()); Value v = new Value(k,_value.value().getBytes()); UKV.put(k,v); response.addProperty(KEY,k.toString()); response.addProperty(REPLICATION_FACTOR,k.desired()); response.addProperty(VALUE_SIZE,v._max); return Response.done(response); }
@Override protected AutoBuffer postLoad(Model m, AutoBuffer ab) { int ntrees = ab.get4(); Futures fs = new Futures(); for (int i = 0; i < ntrees; ++i) { DKV.put(t_keys[i],new Value(t_keys[i],ab.getA1()), fs); for (int j = 0; j < nclasses(); ++j) { if (dtreeKeys[i][j] == null) continue; UKV.put(dtreeKeys[i][j], new Value(dtreeKeys[i][j], ab.get(DTree.TreeModel.CompressedTree.class)), fs); } } fs.blockForPending(); return ab; } };
Result rez = new Result(d, r); Futures fs = new Futures(); UKV.put(resultKey, rez, fs); fs.blockForPending();