@Override protected void setupLocal() { _model = _modelKey.get(); assert _model != null; }
public final Frame blending() { return _blending == null ? null : _blending.get(); }
public CompressedTree ctree( int tnum, int knum ) { return _treeKeys[tnum][knum].get(); } public String toStringTree ( int tnum, int knum ) { return ctree(tnum,knum).toString(this); }
public Leaderboard leaderboard() { return (leaderboard == null ? null : leaderboard._key.get()); } public Model leader() { return (leaderboard() == null ? null : leaderboard().getLeader()); }
@Override public void setupLocal(){if(_jobKey != null)_job = _jobKey.get();}
/** * Converts a given tree of the ensemble to a user-understandable representation. * @param tidx tree index * @param cls tree class * @return instance of SharedTreeSubgraph */ public SharedTreeSubgraph getSharedTreeSubgraph(final int tidx, final int cls) { if (tidx < 0 || tidx >= _output._ntrees) { throw new IllegalArgumentException("Invalid tree index: " + tidx + ". Tree index must be in range [0, " + (_output._ntrees -1) + "]."); } final CompressedTree auxCompressedTree = _output._treeKeysAux[tidx][cls].get(); return _output._treeKeys[tidx][cls].get().toSharedTreeSubgraph(auxCompressedTree, _output._names, _output._domains); }
public void deleteBaseModelPredictions() { if (_output._base_model_predictions_keys != null) { for (Key<Frame> key : _output._base_model_predictions_keys) { if (_output._levelone_frame_id != null && key.get() != null) Frame.deleteTempFrameAndItsNonSharedVecs(key.get(), _output._levelone_frame_id); else key.remove(); } _output._base_model_predictions_keys = null; } }
Model getLeader() { Key<Model>[] modelKeys = getModelKeys(); if (modelKeys == null || 0 == modelKeys.length) return null; return modelKeys[0].get(); }
public void stop() { _jobKey.get().stop(); } }
@Override public int nfeatures() { return _output_frame.get().numCols()-1/*counts*/; } @Override public ModelCategory getModelCategory() { return ModelCategory.Clustering; }
void updateTiming(Key<Job> job_key) { final long now = System.currentTimeMillis(); long start_time_current_model = job_key.get().start_time(); total_training_time_ms = total_checkpointed_run_time_ms + (now - start_time_current_model); checkTimingConsistency(); }
private void updateTiming(Key<Job> job_key) { final long now = System.currentTimeMillis(); long start_time_current_model = job_key.get().start_time(); total_training_time_ms = total_checkpointed_run_time_ms + (now - start_time_current_model); checkTimingConsistency(); }
protected Futures remove_impl( Futures fs ) { Frame varCumhaz2 = _output._var_cumhaz_2 != null ? _output._var_cumhaz_2.get() : null; if (varCumhaz2 != null) varCumhaz2.remove(fs); super.remove_impl(fs); return fs; }
private static Frame getAdaptedTrainFrame(GBMModel m) { Frame f = m._parms._train.get(); String[] warns = m.adaptTestForTrain(f, false, false); assert warns == null || warns.length == 0; return f; }
@Override protected void assignNode(int tidx, int cls, CompressedTree tree, double[] input, NewChunk out) { CompressedTree auxTree = _auxTreeKeys[tidx][cls].get(); assert auxTree != null; final double d = SharedTreeMojoModel.scoreTree(tree._bits, input, true, _domains); final int nodeId = SharedTreeMojoModel.getLeafNodeId(d, auxTree._bits); out.addNum(nodeId, 0); }
/** * Do the local computation: Perform one DeepWaterTask (with run_local=true) iteration. * Pass over all the data (will be replicated in dfork() here), and use _sync_fraction random rows. * This calls DeepWaterTask's reduce() between worker threads that update the same local model_info via Hogwild! * Once the computation is done, reduce() will be called */ @Override public void setupLocal() { super.setupLocal(); _res = new DeepWaterTask(_sharedmodel, _sync_fraction, (Job)_jobKey.get()); addToPendingCount(1); _res.dfork(null, _fr, true /*run_local*/); }
@Override public long progressUnits() { return isPreTrained() ? _pre_trained.get().anyVec().nChunks() : train().vec(0).nChunks() * _epochs; } static final int MAX_VEC_SIZE = 10000;
@Override public void computeImpl() { Word2VecModel model = null; try { init(! _parms.isPreTrained()); // expensive == true IFF the model is not pre-trained // The model to be built model = new Word2VecModel(_job._result, _parms, new Word2VecOutput(Word2Vec.this)); model.delete_and_lock(_job); if (_parms.isPreTrained()) convertToModel(_parms._pre_trained.get(), model); else trainModel(model); } finally { if (model != null) model.unlock(_job); } } private void trainModel(Word2VecModel model) {
@Ignore @Test public void testAirlines() { Frame frame = parse_test_file("smalldata/airlines/allyears2k_headers.zip"); AggregatorModel.AggregatorParameters parms = new AggregatorModel.AggregatorParameters(); parms._train = frame._key; parms._target_num_exemplars = 500; parms._rel_tol_num_exemplars = 0.05; long start = System.currentTimeMillis(); AggregatorModel agg = new Aggregator(parms).trainModel().get(); // 0.179 System.out.println("AggregatorModel finished in: " + (System.currentTimeMillis() - start)/1000. + " seconds"); agg.checkConsistency(); frame.delete(); Frame output = agg._output._output_frame.get(); output.remove(); checkNumExemplars(agg); agg.remove(); }
@Test public void testCovtype() { Frame frame = parse_test_file("smalldata/covtype/covtype.20k.data"); AggregatorModel.AggregatorParameters parms = new AggregatorModel.AggregatorParameters(); parms._train = frame._key; parms._target_num_exemplars = 500; parms._rel_tol_num_exemplars = 0.05; long start = System.currentTimeMillis(); AggregatorModel agg = new Aggregator(parms).trainModel().get(); // 0.179 System.out.println("AggregatorModel finished in: " + (System.currentTimeMillis() - start)/1000. + " seconds"); agg.checkConsistency(); frame.delete(); Frame output = agg._output._output_frame.get(); Log.info("Exemplars: " + output.toString()); output.remove(); checkNumExemplars(agg); agg.remove(); }