private void saveEvaluation(boolean train, IEvaluation[] evaluations) throws Exception { String evalPath = FilenameUtils.concat(outputPath, ("evaluation_" + (train ? "train" : "test"))); //Write evaluations to disk for( int i=0; i<evaluations.length; i++ ){ String path = FilenameUtils.concat(evalPath, "evaluation_" + i + ".txt"); FileUtils.writeStringToFile(new File(path), evaluations[i].stats(), Charset.forName("UTF-8")); } }
private static void evaluatePerformance(MultiLayerNetwork net, int testStartIdx, int nExamples, String outputDirectory) throws Exception { //Assuming here that the full test data set doesn't fit in memory -> load 10 examples at a time Map<Integer, String> labelMap = new HashMap<>(); labelMap.put(0, "circle"); labelMap.put(1, "square"); labelMap.put(2, "arc"); labelMap.put(3, "line"); Evaluation evaluation = new Evaluation(labelMap); DataSetIterator testData = getDataSetIterator(outputDirectory, testStartIdx, nExamples, 1000); while(testData.hasNext()) { DataSet dsTest = testData.next(); INDArray predicted = net.output(dsTest.getFeatures(), false); INDArray actual = dsTest.getLabels(); evaluation.evalTimeSeries(actual, predicted); } System.out.println(evaluation.stats()); }
IEvaluation[] evals = net.doEvaluation(test, new Evaluation(), new ROCMultiClass()); long endEval = System.currentTimeMillis(); .append(" subsetMiniBatches ").append(iterAfter - itersBefore) //Note: "end of epoch" effect - may be smaller than subset size .append(" trainMS ").append(end - start).append(" evalMS ").append(endEval - startEval) .append(" accuracy ").append(e.accuracy()).append(" f1 ").append(e.f1()) .append(" AvgAUC ").append(r.calculateAverageAUC()).append(" AvgAUPRC ").append(r.calculateAverageAUCPR()).append("\n");
Evaluation eval = new Evaluation(outputNum); while(mnistTest.hasNext()){ DataSet ds = mnistTest.next(); INDArray output = model.output(ds.getFeatures(), false); eval.eval(ds.getLabels(), output); log.info(eval.stats()); mnistTest.reset();
Evaluation e = new Evaluation(TinyImageNetDataSetIterator.getLabels(false), 5); //Set up for top 5 accuracy net.doEvaluation(test, e); log.info(e.stats()); FileUtils.writeStringToFile(new File(saveDir, "evaulation.txt"), e.stats(), StandardCharsets.UTF_8);
log.info(eval.stats(true));
/** * Calculate the Matthews correlation coefficient for the specified output * * @param outputNum Output number * @return Matthews correlation coefficient */ public double matthewsCorrelation(int outputNum) { assertIndex(outputNum); return EvaluationUtils.matthewsCorrelation(truePositives(outputNum), falsePositives(outputNum), falseNegatives(outputNum), trueNegatives(outputNum)); }
/** * Get the actual positive count (accounting for any masking) for the specified output/column * * @param outputNum Index of the output (0 to {@link #numLabels()}-1) */ public long getCountActualPositive(int outputNum) { assertIndex(outputNum); return underlying[outputNum].getCountActualPositive(); }
/** * Get the ROC curve for the specified output * @param outputNum Number of the output to get the ROC curve for * @return ROC curve */ public RocCurve getRocCurve(int outputNum) { assertIndex(outputNum); return underlying[outputNum].getRocCurve(); }
/** * Returns the false positive rate for a given label * * @param classLabel the label * @param edgeCase What to output in case of 0/0 * @return fpr as a double */ public double falsePositiveRate(int classLabel, double edgeCase) { double fpCount = falsePositives(classLabel); double tnCount = trueNegatives(classLabel); return EvaluationUtils.falsePositiveRate((long) fpCount, (long) tnCount, edgeCase); }
/** * Returns the false negative rate for a given label * * @param classLabel the label * @param edgeCase What to output in case of 0/0 * @return fnr as a double */ public double falseNegativeRate(Integer classLabel, double edgeCase) { double fnCount = falseNegatives(classLabel); double tpCount = truePositives(classLabel); return EvaluationUtils.falseNegativeRate((long) fnCount, (long) tpCount, edgeCase); }
Evaluation eval = new Evaluation(outputNum); while(mnistTest.hasNext()){ DataSet ds = mnistTest.next(); INDArray output = model.output(ds.getFeatures(), false); eval.eval(ds.getLabels(), output); log.info(eval.stats()); mnistTest.reset();
Evaluation evaluation = new Evaluation(TinyImageNetDataSetIterator.getLabels(false), 5); //Set up for top 5 accuracy evaluation = (Evaluation) sparkNet.doEvaluation(pathsTest, loader, evaluation)[0]; log.info("Evaluation statistics: {}", evaluation.stats()); SparkUtils.writeStringToFile(evalPath, evaluation.stats(), sc);
log.info(eval.stats());
Evaluation eval = new Evaluation(outputNum); while(mnistTest.hasNext()){ DataSet ds = mnistTest.next(); INDArray output = model.output(ds.getFeatures(), false); eval.eval(ds.getLabels(), output); log.info(eval.stats()); mnistTest.reset();
Evaluation evaluation = sparkNet.doEvaluation(testData, 64, new Evaluation(10))[0]; //Work-around for 0.9.1 bug: see https://deeplearning4j.org/releasenotes log.info("***** Evaluation *****"); log.info(evaluation.stats());
log.info(eval.stats()+"\n"); testIter.reset();
Evaluation evaluation = sparkNetwork.doEvaluation(testData, 32, new Evaluation(4))[0];//Work-around for 0.9.1 bug: see https://deeplearning4j.org/releasenotes log.info(evaluation.stats());
System.out.println(evaluation.stats());
Evaluation evaluation = sparkNet.doEvaluation(testData, 64, new Evaluation(10))[0]; //Work-around for 0.9.1 bug: see https://deeplearning4j.org/releasenotes log.info("***** Evaluation *****"); log.info(evaluation.stats());