/** Merge pending-task lists as part of doing a 'reduce' step */ public void add( Futures fs ) { if( fs == null ) return; assert fs != this; // No recursive death, please for( int i=0; i<fs._pending_cnt; i++ ) add(fs._pending[i]); // NPE here if using a dead Future fs._pending = null; // You are dead, should never be inserted into again }
public void alsoBlockFor( Future f ) { if( f == null ) return; getFutures().add(f); }
public void alsoBlockFor( Futures fs ) { if( fs == null ) return; getFutures().add(fs); }
static void put( H2ONode h2o, Key key, Value val, Futures fs, boolean dontCache) { Future f = RPC.call(h2o,new TaskPutKey(key,val,dontCache)); if( fs != null ) fs.add(f); }
static void invalidate( H2ONode h2o, Key key, Futures fs ) { Future f = RPC.call(h2o,new TaskInvalidateKey(key)); if( fs != null ) fs.add(f); }
public boolean Next(Futures fs) throws IOException { if (_start_index < _rows_lst.size()) { if (_start_index + _batch_size > _rows_lst.size()) _start_index = _rows_lst.size() - _batch_size; // Multi-Threaded data preparation for (int i = 0; i < _batch_size; i++) fs.add(H2O.submitTask(new FrameDataConverter(i, _rows_lst.get(_start_index+i), _dinfo, _label_lst==null?-1:_label_lst.get(_start_index + i), _data[which()], _label[which()], _cache))); fs.blockForPending(); flip(); _start_index += _batch_size; return true; } else { return false; } }
public boolean Next(Futures fs) throws IOException { if (_start_index < _num_obs) { if (_start_index + _batch_size > _num_obs) _start_index = _num_obs - _batch_size; // Multi-Threaded data preparation for (int i = 0; i < _batch_size; i++) fs.add(H2O.submitTask(new TextConverter(i, _start_index + i, _txt_list.get(_start_index +i), _label_lst == null?Float.NaN : _label_lst.get(_start_index +i),_data[which()], _wordsPerLine, _label[which()], _cache))); fs.blockForPending(); flip(); _start_index = _start_index + _batch_size; return true; } else { return false; } }
private Vec[] vecs_impl() { // Load all Vec headers; load them all in parallel by spawning F/J tasks. final Vec [] vecs = new Vec[_keys.length]; Futures fs = new Futures(); for( int i=0; i<_keys.length; i++ ) { final int ii = i; final Key k = _keys[i]; H2OCountedCompleter t = new H2OCountedCompleter() { // We need higher priority here as there is a danger of deadlock in // case of many calls from MRTask2 at once (e.g. frame with many // vectors invokes rollup tasks for all vectors in parallel). Should // probably be done in CPS style in the future @Override public byte priority(){return H2O.MIN_HI_PRIORITY;} @Override public void compute2() { Value v = DKV.get(k); if( v==null ) Log.err("Missing vector #" + ii + " (" + _names[ii] + ") during Frame fetch: "+k); vecs[ii] = v.get(); tryComplete(); } }; H2O.submitTask(t); fs.add(t); } fs.blockForPending(); return vecs; } // Force a cache-flush & reload, assuming vec mappings were altered remotely
public boolean Next(Futures fs) throws IOException { if (_start_index < _num_obs) { if (_start_index + _batch_size > _num_obs) _start_index = _num_obs - _batch_size; // Multi-Threaded data preparation Conversion conv = new Conversion(); conv._dim._height=this._height; conv._dim._width=this._width; conv._dim._channels=this._channels; for (int i = 0; i < _batch_size; i++) fs.add(H2O.submitTask(new ImageConverter(i, _img_lst.get(_start_index +i), _label_lst ==null?Float.NaN: _label_lst.get(_start_index +i),conv, _data[which()], _meanData, _label[which()], _cache))); fs.blockForPending(); flip(); _start_index = _start_index + _batch_size; return true; } else { return false; } }
public Vec rollupStats(Futures fs) { Vec vthis = DKV.get(_key).get(); if( vthis._naCnt==-2 ) throw new IllegalArgumentException("Cannot ask for roll-up stats while the vector is being actively written."); if( vthis._naCnt>= 0 ) // KV store has a better answer return vthis == this ? this : setRollupStats(vthis); // KV store reports we need to recompute RollupStats rs = new RollupStats().dfork(this); if(fs != null) fs.add(rs); else setRollupStats(rs.getResult()); return this; }
while ( tjR*(rpb=(N - tjR)/p)<Gram.MIN_TSKSZ && p>1) --p; while (p-- > 1) { fs.add(new BlockTask(xx,i,i+rpb,j,tjR).fork()); i += rpb;
while ( tjR*(rpb=(N - tjR)/p)<Gram.MIN_TSKSZ && p>1) --p; while (p-- > 1) { fs.add(new BlockTask(xx,i,i+rpb,j,tjR).fork()); i += rpb;
fs.add(H2O.submitTask(ntt)); _localmodel.add_processed_local(iter._batch_size);
fs.add(RPC.call(H2O.CLOUD._memary[nidx],(re=new RemoteExec((grpnum-1),p2._nlocals[nidx],g._ds,fg,envkey))));
fs.add(task4var);
public boolean Next(Futures fs) throws IOException { if (_start_index < _rows_lst.size()) { if (_start_index + _batch_size > _rows_lst.size()) _start_index = _rows_lst.size() - _batch_size; // Multi-Threaded data preparation for (int i = 0; i < _batch_size; i++) fs.add(H2O.submitTask(new FrameDataConverter(i, _rows_lst.get(_start_index+i), _dinfo, _label_lst==null?-1:_label_lst.get(_start_index + i), _data[which()], _label[which()], _cache))); fs.blockForPending(); flip(); _start_index += _batch_size; return true; } else { return false; } }
public boolean Next(Futures fs) throws IOException { if (_start_index < _num_obs) { if (_start_index + _batch_size > _num_obs) _start_index = _num_obs - _batch_size; // Multi-Threaded data preparation for (int i = 0; i < _batch_size; i++) fs.add(H2O.submitTask(new TextConverter(i, _start_index + i, _txt_list.get(_start_index +i), _label_lst == null?Float.NaN : _label_lst.get(_start_index +i),_data[which()], _wordsPerLine, _label[which()], _cache))); fs.blockForPending(); flip(); _start_index = _start_index + _batch_size; return true; } else { return false; } }
public boolean Next(Futures fs) throws IOException { if (_start_index < _num_obs) { if (_start_index + _batch_size > _num_obs) _start_index = _num_obs - _batch_size; // Multi-Threaded data preparation Conversion conv = new Conversion(); conv._dim._height=this._height; conv._dim._width=this._width; conv._dim._channels=this._channels; for (int i = 0; i < _batch_size; i++) fs.add(H2O.submitTask(new ImageConverter(i, _img_lst.get(_start_index +i), _label_lst ==null?Float.NaN: _label_lst.get(_start_index +i),conv, _data[which()], _meanData, _label[which()], _cache))); fs.blockForPending(); flip(); _start_index = _start_index + _batch_size; return true; } else { return false; } }
while ( tjR*(rpb=(N - tjR)/p)<Gram.MIN_TSKSZ && p>1) --p; while (p-- > 1) { fs.add(new BlockTask(xx,i,i+rpb,j,tjR).fork()); i += rpb;