/** * Returns the misclassification error of the current model on a set of * instances. * * @param data the set of instances * @return the error rate * @throws Exception if something goes wrong */ protected double getErrorRate(Instances data) throws Exception { Evaluation eval = new Evaluation(data); eval.evaluateModel(this, data); return eval.errorRate(); }
/** * Returns the misclassification error of the current model on a set of * instances. * * @param data the set of instances * @return the error rate * @throws Exception if something goes wrong */ protected double getErrorRate(Instances data) throws Exception { Evaluation eval = new Evaluation(data); eval.evaluateModel(this, data); return eval.errorRate(); }
/** * Determine whether the scheme performs worse than ZeroR during testing * * @param classifier the pre-trained classifier * @param evaluation the classifier evaluation object * @param train the training data * @param test the test data * @return index 0 is true if the scheme performs better than ZeroR * @throws Exception if there was a problem during the scheme's testing */ protected boolean[] testWRTZeroR(Classifier classifier, Evaluation evaluation, Instances train, Instances test) throws Exception { boolean[] result = new boolean[2]; evaluation.evaluateModel(classifier, test); try { // Tested OK, compare with ZeroR Classifier zeroR = new weka.classifiers.rules.ZeroR(); zeroR.buildClassifier(train); Evaluation zeroREval = new Evaluation(train); zeroREval.evaluateModel(zeroR, test); result[0] = Utils.grOrEq(zeroREval.errorRate(), evaluation.errorRate()); } catch (Exception ex) { throw new Error("Problem determining ZeroR performance: " + ex.getMessage()); } return result; }
/** * Determine whether the scheme performs worse than ZeroR during testing * * @param classifier the pre-trained classifier * @param evaluation the classifier evaluation object * @param train the training data * @param test the test data * @return index 0 is true if the scheme performs better than ZeroR * @throws Exception if there was a problem during the scheme's testing */ protected boolean[] testWRTZeroR(Classifier classifier, Evaluation evaluation, Instances train, Instances test) throws Exception { boolean[] result = new boolean[2]; evaluation.evaluateModel(classifier, test); try { // Tested OK, compare with ZeroR Classifier zeroR = new weka.classifiers.rules.ZeroR(); zeroR.buildClassifier(train); Evaluation zeroREval = new Evaluation(train); zeroREval.evaluateModel(zeroR, test); result[0] = Utils.grOrEq(zeroREval.errorRate(), evaluation.errorRate()); } catch (Exception ex) { throw new Error("Problem determining ZeroR performance: " + ex.getMessage()); } return result; }
/** * Get training error (from loaded data). * * @param verbose option to display evaluation information in the log window * @return classifier error on the training data set. */ public double getTrainingError(boolean verbose) { if(null == this.trainHeader) return -1; double error = -1; try { final Evaluation evaluation = new Evaluation(this.loadedTrainingData); evaluation.evaluateModel(classifier, this.loadedTrainingData); if(verbose) IJ.log(evaluation.toSummaryString("\n=== Training set evaluation ===\n", false)); error = evaluation.errorRate(); } catch (Exception e) { e.printStackTrace(); } return error; }
/** * Get training error (from loaded data). * * @param verbose option to display evaluation information in the log window * @return classifier error on the training data set. */ public double getTrainingError(boolean verbose) { if(null == this.trainHeader) return -1; double error = -1; try { final Evaluation evaluation = new Evaluation(this.loadedTrainingData); evaluation.evaluateModel(classifier, this.loadedTrainingData); if(verbose) IJ.log(evaluation.toSummaryString("\n=== Training set evaluation ===\n", false)); error = evaluation.errorRate(); } catch (Exception e) { e.printStackTrace(); } return error; }
evaluation.evaluateModel(copiedClassifier, test); double error = evaluation.errorRate(); if (m_Debug) { System.err.println("Cross-validated error rate: "
evaluation.evaluateModel(copiedClassifier, test); double error = evaluation.errorRate(); if (m_Debug) { System.err.println("Cross-validated error rate: "
double error = evaluation.errorRate(); if (m_Debug) { System.err.println("Error rate: " + Utils.doubleToString(error, 6, 4)
double error = evaluation.errorRate(); if (m_Debug) { System.err.println("Error rate: " + Utils.doubleToString(error, 6, 4)
m_randomSeed)); errorRate = o_Evaluation.errorRate(); return (1 - errorRate) * 100.0;
m_randomSeed)); errorRate = o_Evaluation.errorRate(); return (1 - errorRate) * 100.0;
IJ.log(evaluation.toMatrixString()); error = evaluation.errorRate(); } catch (Exception e) {
IJ.log(evaluation.toMatrixString()); error = evaluation.errorRate(); } catch (Exception e) {
IJ.log(evaluation.toMatrixString()); error = evaluation.errorRate(); } catch (Exception e) {
IJ.log(evaluation.toMatrixString()); error = evaluation.errorRate(); } catch (Exception e) {