public static <DT extends DTask> RPC<DT> call(H2ONode target, DT dtask) { return new RPC(target,dtask).call(); }
private final void doTest(){ _droppedPackets = new int[_N]; Arrays.fill(_droppedPackets,-1); _pings = new RPC[_msgSzs.length][_N]; // addToPendingCount(_msgSzs.length*_N - 1); for(int i = 0; i < _msgSzs.length; ++i) for(int j = 0; j < _N; ++j) // instead of synchronization, just wait for predetermined amount of time _pings[i][j] = new RPC(H2O.CLOUD._memary[_tgtId],new UDPPing(_msgSzs[i]))/*.addCompleter(this)*/.call(); try { Thread.sleep(5000); } catch (InterruptedException e) {} // if not done yet, finish no matter what (racy but we don't care here - only a debug tool, does not have to be precise) // setPendingCount(0); }
private final void doTest() { _droppedPackets = new int[_N]; Arrays.fill(_droppedPackets, -1); _pings = new RPC[_msgSzs.length][_N]; // addToPendingCount(_msgSzs.length*_N - 1); for (int i = 0; i < _msgSzs.length; ++i) for (int j = 0; j < _N; ++j) // instead of synchronization, just wait for predetermined amount of time _pings[i][j] = new RPC(H2O.CLOUD._memary[_tgtId], new UDPPing(_msgSzs[i]))/*.addCompleter(this)*/.call(); try { Thread.sleep(5000); } catch (InterruptedException e) { } // if not done yet, finish no matter what (racy but we don't care here - only a debug tool, does not have to be precise) // setPendingCount(0); }
private final RPC<T> remote_compute( ArrayList<Key> keys ) { if( keys.size() == 0 ) return null; DRemoteTask rpc = clone(); rpc.setCompleter(null); rpc._keys = keys.toArray(new Key[keys.size()]); addToPendingCount(1); // Block until the RPC returns // Set self up as needing completion by this RPC: when the ACK comes back // we'll get a wakeup. return new RPC(keys.get(0).home_node(), rpc).addCompleter(this).call(); }
private void forkDTask(final int i, H2ONode n){ _tasks[i] = new GLMT(new Callback(n,i),_glms[i],_lambda); assert Double.isNaN(_lambda) || _tasks[i]._glm._lastResult._fullGrad != null; if(n == H2O.SELF) H2O.submitTask(_tasks[i]); else new RPC(n,_tasks[i]).call(); } class Callback extends H2OCallback<H2OCountedCompleter> {
@Override public RequestBuilders.Response serve(){ for (H2ONode node : H2O.CLOUD._memary) { GCTask t = new GCTask(); new RPC<GCTask>(node, t).call().get(); } return RequestBuilders.Response.doneEmpty(); } }
private final RPC<T> remote_compute( int nlo, int nhi ) { // No remote work? if( !(nlo < nhi) ) return null; int node = addShift(nlo); assert node != H2O.SELF.index(); T rpc = clone(); rpc.setCompleter(null); rpc._nhi = (short)nhi; addToPendingCount(1); // Not complete until the RPC returns // Set self up as needing completion by this RPC: when the ACK comes back // we'll get a wakeup. return new RPC(H2O.CLOUD._memary[node], rpc).addCompleter(this).call(); }
/** * Send a message from this node to all nodes in serial (including self), and receive it back * @param msg_size message size in bytes * @return Time in nanoseconds that it took to send and receive the message (one per node) */ private static double[] send_recv_all(int msg_size, int repeats) { byte[] payload = new byte[msg_size]; new Random().nextBytes(payload); final int siz = H2O.CLOUD.size(); double[] times = new double[siz]; for (int i = 0; i < siz; ++i) { //loop over compute nodes Log.debug("NetworkTest send_recv_all starting PingPong to node " + i + "..."); H2ONode node = H2O.CLOUD._memary[i]; Timer t = new Timer(); for (int l = 0; l < repeats; ++l) { Log.debug("NetworkTest send_recv_all starting msg_size " + msg_size + " bytes, iteration "+ l +" of "+ repeats + " ..."); PingPongTask ppt = new PingPongTask(payload); //same payload for all nodes new RPC<PingPongTask>(node, ppt).call().get(); //blocking send Log.debug("NetworkTest send_recv_all completed iteration "+ l +" of "+ repeats); } times[i] = (double) t.nanos() / repeats; Log.debug("NetworkTest send_recv_all completed PingPong to node " + i); } return times; }
@Override public void compute2() { if(_srcId == H2O.SELF.index()) { doTest(); tryComplete(); } else { _done = true; final UDPDropTester t = (UDPDropTester) clone(); new RPC(H2O.CLOUD._memary[_srcId], t).addCompleter(new H2OCountedCompleter(this) { @Override public void compute2() { } @Override public void onCompletion(CountedCompleter cc) { copyOver(t); } }).call(); } } }
@Override public final void compute2(){ if(_key.home()){ Value val = H2O.get(_key); if(val != null) { V v = val.get(); map(v); } tryComplete(); } else new RPC(_key.home_node(),this).addCompleter(this).call(); } // onCompletion must be empty here, may be invoked twice (on remote and local)
public static Value get( H2ONode target, Key key, int priority ) { RPC<TaskGetKey> rpc, old; while( true ) { // Repeat until we get a unique TGK installed per key // Do we have an old TaskGetKey in-progress? rpc = TGKS.get(key); if( rpc != null && rpc._dt._priority >= priority ) break; old = rpc; // Make a new TGK. rpc = new RPC(target,new TaskGetKey(key,priority),1.0f); if( TGKS.putIfMatchUnlocked(key,rpc,old) == old ) { rpc.setTaskNum().call(); // Start the op break; // Successful install of a fresh RPC } } Value val = rpc.get()._val; // Block for, then fetch out the result TGKS.putIfMatchUnlocked(key,null,rpc); // Clear from cache return val; }
@Override protected Response serve() { if ((node_idx.value() < 0) || (node_idx.value() >= H2O.CLOUD.size())) { throw new IllegalArgumentException("Illegal node_idx for this H2O cluster (must be from 0 to " + H2O.CLOUD.size() + ")"); } H2ONode node = H2O.CLOUD._memary[node_idx.value()]; GetTicksTask ppt = new GetTicksTask(); Log.trace("GetTicksTask starting to node " + node_idx.value() + "..."); // Synchronous RPC call to get ticks from remote (possibly this) node. new RPC<GetTicksTask>(node, ppt).call().get(); Log.trace("GetTicksTask completed to node " + node_idx.value()); long[][] cpuTicks = ppt._cpuTicks; // Stuff tick information into json response. JsonArray j = new JsonArray(); for (long[] arr : cpuTicks) { JsonArray j2 = new JsonArray(); j2.add(new JsonPrimitive(arr[0])); j2.add(new JsonPrimitive(arr[1])); j2.add(new JsonPrimitive(arr[2])); j2.add(new JsonPrimitive(arr[3])); j.add(j2); } JsonObject o = new JsonObject(); o.add("cpuTicks", j); return Response.done(o); } }
static boolean hasGPU(H2ONode node, int gpu_id) { final boolean hasGPU; if (H2O.SELF.equals(node)) { hasGPU = hasGPU(gpu_id); } else { HasGPUTask t = new HasGPUTask(gpu_id); new RPC<>(node, t).call().get(); hasGPU = t._hasGPU; } Log.debug("Availability of GPU (id=" + gpu_id + ") on node " + node + ": " + hasGPU); return hasGPU; }
public byte[] getBoosterBytes() { final H2ONode boosterNode = getBoosterNode(); final byte[] boosterBytes; if (H2O.SELF.equals(boosterNode)) { boosterBytes = XGBoost.getRawArray(XGBoostUpdater.getUpdater(_modelKey).getBooster()); } else { Log.debug("Booster will be retrieved from a remote node, node=" + boosterNode); FetchBoosterTask t = new FetchBoosterTask(_modelKey); boosterBytes = new RPC<>(boosterNode, t).call().get()._boosterBytes; } return boosterBytes; }