/** * EvaluateModel - Build model 'h' on 'D_train', test it on 'D_test', threshold it according to 'top', using default verbosity option. * @param h a multi-dim. classifier * @param D_train training data * @param D_test test data * @param top Threshold OPtion (pertains to multi-label data only) * @return Result raw prediction data with evaluation statistics included. */ public static Result evaluateModel(MultiXClassifier h, Instances D_train, Instances D_test, String top) throws Exception { return Evaluation.evaluateModel(h,D_train,D_test,top,"1"); }
/** * CVModel - Split D into train/test folds, and then train and evaluate on each one. * @param h a multi-output classifier * @param D test data Instances * @param numFolds number of folds of CV * @param top Threshold OPtion (pertains to multi-label data only) * @return Result raw prediction data with evaluation statistics included. */ public static Result cvModel(MultiLabelClassifier h, Instances D, int numFolds, String top) throws Exception { return cvModel(h,D,numFolds,top,"1"); }
/** * loadDataset - load a dataset, given command line option '-t' specifying an arff file. * @param options command line options, specifying dataset filename * @return the dataset */ public static Instances loadDataset(String options[]) throws Exception { return loadDataset(options,'t'); }
Evaluation.printOptions(h.listOptions()); return; Instances D_train = loadDataset(options); r = Evaluation.cvModel(h,D_train,numFolds,top,voption); System.out.println(r.toString()); D_test = loadDataset(options,'T'); MLUtils.prepareData(D_test); } catch(Exception e) { r = testClassifier(h, D_test); r = evaluateModel(h, D_test, t, voption); if (doEval) { if (Threaded) { r = evaluateModelM(h, D_train, D_test, top, voption); r = evaluateModel(h, D_train, D_test, top, voption); Evaluation.printOptions(h.listOptions()); System.exit(1);
/** * EvaluateModel - Build model 'h' on 'D_train', test it on 'D_test', threshold it according to 'top', verbosity 'vop'. * @param h a multi-dim. classifier * @param D_train training data * @param D_test test data * @param top Threshold OPtion (pertains to multi-label data only) * @param vop Verbosity OPtion (which measures do we want to calculate/output) * @return Result raw prediction data with evaluation statistics included. */ public static Result evaluateModel(MultiXClassifier h, Instances D_train, Instances D_test, String top, String vop) throws Exception { Result r = evaluateModel(h,D_train,D_test); if (h instanceof MultiTargetClassifier || isMT(D_test)) { r.setInfo("Type","MT"); } else if (h instanceof MultiLabelClassifier) { r.setInfo("Type","ML"); r.setInfo("Threshold",MLEvalUtils.getThreshold(r.predictions,D_train,top)); // <-- only relevant to ML (for now), but we'll put it in here in any case } r.setInfo("Verbosity",vop); r.output = Result.getStats(r, vop); return r; }
public static void main(String [] argv) { try { Evaluation.runExperiment(((MultiLabelClassifier) new HARAMNetwork()), argv); } catch (Exception e) { e.printStackTrace(); System.err.println(e.getMessage()); } }
if (m_Test == null) { if (m_Folds >= 2) { eval = Evaluation.cvModel(classifier, m_Train, m_Folds, m_TOP, m_VOP); eval = Evaluation.evaluateModel(classifier, m_Train, m_TOP, m_VOP); eval = Evaluation.evaluateModel(classifier, m_Test, m_TOP, m_VOP);
test = Evaluation.loadDataset(options, 'T'); MLUtils.prepareData(test); needPrebuiltModel = true; train = Evaluation.loadDataset(options, 't'); MLUtils.prepareData(train); needPrebuiltModel = false; // we can build a model with training data if (h.getDebug()) System.out.println("Non-incremental evaluation on provided test set"); result = Evaluation.evaluateModel(h, test, Top, Vop);
/** * EvaluateModel - Assume 'h' is already built, test it on 'D_test', threshold it according to 'top', verbosity 'vop'. * @param h a multi-dim. classifier * @param D_test test data * @param tal Threshold VALUES (not option) * @param vop Verbosity OPtion (which measures do we want to calculate/output) * @return Result raw prediction data with evaluation statistics included. */ public static Result evaluateModel(MultiXClassifier h, Instances D_test, String tal, String vop) throws Exception { Result r = testClassifier(h,D_test); if (h instanceof MultiTargetClassifier || isMT(D_test)) { r.setInfo("Type","MT"); } else if (h instanceof MultiLabelClassifier) { r.setInfo("Type","ML"); } r.setInfo("Threshold",tal); r.setInfo("Verbosity",vop); r.output = Result.getStats(r, vop); return r; }
/** * Called by classifier's main() method upon initialisation from the command line. * @param h A classifier * @param args Command-line options. */ public static void runClassifier(MultiLabelClassifier h, String args[]) { if (h instanceof UpdateableClassifier) { try { IncrementalEvaluation.runExperiment(h,args); } catch(Exception e) { System.err.println("\n"+e); //e.printStackTrace(); IncrementalEvaluation.printOptions(h.listOptions()); } } else { try { Evaluation.runExperiment(h,args); } catch(Exception e) { System.err.println("\n"+e); //e.printStackTrace(); Evaluation.printOptions(h.listOptions()); } } }
Result result = testClassifierM(h,D_test); long after_test = System.currentTimeMillis(); if (h instanceof MultiTargetClassifier || isMT(D_test)) { result.setInfo("Type","MT");
/** * Payoff - Return a default score of h evaluated on D. * @param h a classifier * @param D a dataset */ public double payoff(CC h, Instances D) throws Exception { Result r = Evaluation.testClassifier(h,D); // assume multi-label for now r.setInfo("Type","ML"); r.setInfo("Threshold","0.5"); r.setInfo("Verbosity","7"); r.output = Result.getStats(r, "7"); return (Double)r.getMeasurement(m_Payoff); }
result.setInfo("Dataset",MLUtils.getDatasetName(D)); result.setInfo("Verbosity",Vop); if (h instanceof MultiTargetClassifier || Evaluation.isMT(D)) { result.setInfo("Type","MT");
Evaluation.printOptions(h.listOptions()); return; Instances D_train = loadDataset(options); r = Evaluation.cvModel(h,D_train,numFolds,top,voption); System.out.println(r.toString()); D_test = loadDataset(options,'T'); MLUtils.prepareData(D_test); } catch(Exception e) { r = testClassifier(h, D_test); r = evaluateModel(h, D_test, t, voption); if (doEval) { if (Threaded) { r = evaluateModelM(h, D_train, D_test, top, voption); r = evaluateModel(h, D_train, D_test, top, voption); Evaluation.printOptions(h.listOptions()); System.exit(1);
/** * EvaluateModel - Build model 'h' on 'D_train', test it on 'D_test', threshold it according to 'top', verbosity 'vop'. * @param h a multi-dim. classifier * @param D_train training data * @param D_test test data * @param top Threshold OPtion (pertains to multi-label data only) * @param vop Verbosity OPtion (which measures do we want to calculate/output) * @return Result raw prediction data with evaluation statistics included. */ public static Result evaluateModel(MultiXClassifier h, Instances D_train, Instances D_test, String top, String vop) throws Exception { Result r = evaluateModel(h,D_train,D_test); if (h instanceof MultiTargetClassifier || isMT(D_test)) { r.setInfo("Type","MT"); } else if (h instanceof MultiLabelClassifier) { r.setInfo("Type","ML"); r.setInfo("Threshold",MLEvalUtils.getThreshold(r.predictions,D_train,top)); // <-- only relevant to ML (for now), but we'll put it in here in any case } r.setInfo("Verbosity",vop); r.output = Result.getStats(r, vop); return r; }
public static void main(String [] argv) { try { Evaluation.runExperiment(((MultiLabelClassifier) new WvARAM()), argv); } catch (Exception e) { e.printStackTrace(); System.err.println(e.getMessage()); } } public double[][] distributionForInstanceM(Instances i) throws Exception {
if (m_Test == null) { if (m_Folds >= 2) { eval = Evaluation.cvModel(classifier, m_Train, m_Folds, m_TOP, m_VOP); eval = Evaluation.evaluateModel(classifier, m_Train, m_TOP, m_VOP); eval = Evaluation.evaluateModel(classifier, m_Test, m_TOP, m_VOP);
test = Evaluation.loadDataset(options, 'T'); MLUtils.prepareData(test); needPrebuiltModel = true; train = Evaluation.loadDataset(options, 't'); MLUtils.prepareData(train); needPrebuiltModel = false; // we can build a model with training data if (h.getDebug()) System.out.println("Non-incremental evaluation on provided test set"); result = Evaluation.evaluateModel(h, test, Top, Vop);
/** * EvaluateModel - Assume 'h' is already built, test it on 'D_test', threshold it according to 'top', verbosity 'vop'. * @param h a multi-dim. classifier * @param D_test test data * @param tal Threshold VALUES (not option) * @param vop Verbosity OPtion (which measures do we want to calculate/output) * @return Result raw prediction data with evaluation statistics included. */ public static Result evaluateModel(MultiXClassifier h, Instances D_test, String tal, String vop) throws Exception { Result r = testClassifier(h,D_test); if (h instanceof MultiTargetClassifier || isMT(D_test)) { r.setInfo("Type","MT"); } else if (h instanceof MultiLabelClassifier) { r.setInfo("Type","ML"); } r.setInfo("Threshold",tal); r.setInfo("Verbosity",vop); r.output = Result.getStats(r, vop); return r; }
/** * Called by classifier's main() method upon initialisation from the command line. * @param h A classifier * @param args Command-line options. */ public static void runClassifier(MultiLabelClassifier h, String args[]) { if (h instanceof UpdateableClassifier) { try { IncrementalEvaluation.runExperiment(h,args); } catch(Exception e) { System.err.println("\n"+e); //e.printStackTrace(); IncrementalEvaluation.printOptions(h.listOptions()); } } else { try { Evaluation.runExperiment(h,args); } catch(Exception e) { System.err.println("\n"+e); //e.printStackTrace(); Evaluation.printOptions(h.listOptions()); } } }