private static void evaluatePerformance(MultiLayerNetwork net, int testStartIdx, int nExamples, String outputDirectory) throws Exception { //Assuming here that the full test data set doesn't fit in memory -> load 10 examples at a time Map<Integer, String> labelMap = new HashMap<>(); labelMap.put(0, "circle"); labelMap.put(1, "square"); labelMap.put(2, "arc"); labelMap.put(3, "line"); Evaluation evaluation = new Evaluation(labelMap); DataSetIterator testData = getDataSetIterator(outputDirectory, testStartIdx, nExamples, 1000); while(testData.hasNext()) { DataSet dsTest = testData.next(); INDArray predicted = net.output(dsTest.getFeatures(), false); INDArray actual = dsTest.getLabels(); evaluation.evalTimeSeries(actual, predicted); } System.out.println(evaluation.stats()); }
IEvaluation[] evals = net.doEvaluation(test, new Evaluation(), new ROCMultiClass()); long endEval = System.currentTimeMillis();
Evaluation e = new Evaluation(TinyImageNetDataSetIterator.getLabels(false), 5); //Set up for top 5 accuracy net.doEvaluation(test, e);
Evaluation evaluation = new Evaluation(TinyImageNetDataSetIterator.getLabels(false), 5); //Set up for top 5 accuracy evaluation = (Evaluation) sparkNet.doEvaluation(pathsTest, loader, evaluation)[0]; log.info("Evaluation statistics: {}", evaluation.stats());
Evaluation evaluation = sparkNet.doEvaluation(testData, 64, new Evaluation(10))[0]; //Work-around for 0.9.1 bug: see https://deeplearning4j.org/releasenotes log.info("***** Evaluation *****"); log.info(evaluation.stats());
Evaluation evaluation = sparkNetwork.doEvaluation(testData, 32, new Evaluation(4))[0];//Work-around for 0.9.1 bug: see https://deeplearning4j.org/releasenotes log.info(evaluation.stats());
Evaluation evaluation = sparkNet.doEvaluation(testData, 64, new Evaluation(10))[0]; //Work-around for 0.9.1 bug: see https://deeplearning4j.org/releasenotes log.info("***** Evaluation *****"); log.info(evaluation.stats());
Evaluation eval = new Evaluation(outputNum); while(mnistTest.hasNext()){ DataSet ds = mnistTest.next();
Evaluation eval = new Evaluation(outputNum); while(mnistTest.hasNext()){ DataSet ds = mnistTest.next();
Evaluation eval = new Evaluation(outputNum); while(mnistTest.hasNext()){ DataSet ds = mnistTest.next();
/** * Evaluation will be launched after each *frequency* iteration * @param iterator * @param frequency */ public EvaluativeListener(@NonNull DataSetIterator iterator, int frequency) { this(iterator, frequency, InvocationType.ITERATION_END, new Evaluation()); }
/** * Evaluation will be launched after each *frequency* iteration * @param iterator * @param frequency */ public EvaluativeListener(@NonNull MultiDataSetIterator iterator, int frequency) { this(iterator, frequency, InvocationType.ITERATION_END, new Evaluation()); }
public EvaluativeListener(@NonNull MultiDataSetIterator iterator, int frequency, @NonNull InvocationType type) { this(iterator, frequency, type, new Evaluation()); }
public EvaluativeListener(@NonNull MultiDataSet multiDataSet, int frequency, @NonNull InvocationType type) { this(multiDataSet, frequency, type, new Evaluation()); }
public EvaluativeListener(@NonNull DataSetIterator iterator, int frequency, @NonNull InvocationType type) { this(iterator, frequency, type, new Evaluation()); }
public EvaluativeListener(@NonNull DataSet dataSet, int frequency, @NonNull InvocationType type) { this(dataSet, frequency, type, new Evaluation()); }
/** * Evaluate the network (for classification) on the provided data set, with top N accuracy in addition to standard accuracy. * For 'standard' accuracy evaluation only, use topN = 1 * * @param iterator Iterator (data) to evaluate on * @param labelsList List of labels. May be null. * @param topN N value for top N accuracy evaluation * @return Evaluation object, summarizing the results of the evaluation on the provided DataSetIterator */ public Evaluation evaluate(MultiDataSetIterator iterator, List<String> labelsList, int topN) { return doEvaluation(iterator, new Evaluation(labelsList, topN))[0]; }
/** * Evaluate the network (for classification) on the provided data set, with top N accuracy in addition to standard accuracy. * For 'standard' accuracy evaluation only, use topN = 1 * * @param iterator Iterator (data) to evaluate on * @param labelsList List of labels. May be null. * @param topN N value for top N accuracy evaluation * @return Evaluation object, summarizing the results of the evaluation on the provided DataSetIterator */ public Evaluation evaluate(DataSetIterator iterator, List<String> labelsList, int topN) { if (labelsList == null) labelsList = iterator.getLabels(); return doEvaluation(iterator, new Evaluation(labelsList, topN))[0]; }
@Override public String evaluate(FederatedDataSet federatedDataSet) { //evaluate the model on the test set DataSet testData = (DataSet) federatedDataSet.getNativeDataSet(); double score = model.score(testData); Evaluation eval = new Evaluation(numClasses); INDArray output = model.output(testData.getFeatureMatrix()); eval.eval(testData.getLabels(), output); return "Score: " + score; }
@Override public String evaluate(FederatedDataSet federatedDataSet) { DataSet testData = (DataSet) federatedDataSet.getNativeDataSet(); List<DataSet> listDs = testData.asList(); DataSetIterator iterator = new ListDataSetIterator(listDs, BATCH_SIZE); Evaluation eval = new Evaluation(OUTPUT_NUM); //create an evaluation object with 10 possible classes while (iterator.hasNext()) { DataSet next = iterator.next(); INDArray output = model.output(next.getFeatureMatrix()); //get the networks prediction eval.eval(next.getLabels(), output); //check the prediction against the true class } return eval.stats(); }