List of usage examples for weka.classifiers Evaluation weightedAreaUnderROC
public double weightedAreaUnderROC()
From source file:FlexDMThread.java
License:Open Source License
public void run() { try {/*from ww w . j av a 2 s . com*/ //Get the data from the source FlexDM.getMainData.acquire(); Instances data = dataset.getSource().getDataSet(); FlexDM.getMainData.release(); //Set class attribute if undefined if (data.classIndex() == -1) { data.setClassIndex(data.numAttributes() - 1); } //Process hyperparameters for classifier String temp = ""; for (int i = 0; i < classifier.getNumParams(); i++) { temp += classifier.getParameter(i).getName(); temp += " "; if (classifier.getParameter(i).getValue() != null) { temp += classifier.getParameter(i).getValue(); temp += " "; } } String[] options = weka.core.Utils.splitOptions(temp); //Print to console- experiment is starting if (temp.equals("")) { //no parameters temp = "results_no_parameters"; try { System.out.println("STARTING CLASSIFIER " + cNum + " - " + classifier.getName() + " on dataset " + dataset.getName().substring(dataset.getName().lastIndexOf("\\") + 1) + " with no parameters"); } catch (Exception e) { System.out.println("STARTING CLASSIFIER " + cNum + " - " + classifier.getName() + " on dataset " + dataset.getName() + " with no parameters"); } } else { //parameters try { System.out.println("STARTING CLASSIFIER " + cNum + " - " + classifier.getName() + " on dataset " + dataset.getName().substring(dataset.getName().lastIndexOf("\\") + 1) + " with parameters " + temp); } catch (Exception e) { System.out.println("STARTING CLASSIFIER " + cNum + " - " + classifier.getName() + " on dataset " + dataset.getName() + " with parameters " + temp); } } //Create classifier, setting parameters weka.classifiers.Classifier x = createObject(classifier.getName()); x.setOptions(options); x.buildClassifier(data); //Process the test selection String[] tempTest = dataset.getTest().split("\\s"); //Create evaluation object for training and testing classifiers Evaluation eval = new Evaluation(data); StringBuffer predictions = new StringBuffer(); //Train and evaluate classifier if (tempTest[0].equals("testset")) { //specified test file //Build classifier x.buildClassifier(data); //Open test file, load data //DataSource testFile = new DataSource(dataset.getTest().substring(7).trim()); // Instances testSet = testFile.getDataSet(); FlexDM.getTestData.acquire(); Instances testSet = dataset.getTestFile().getDataSet(); FlexDM.getTestData.release(); //Set class attribute if undefined if (testSet.classIndex() == -1) { testSet.setClassIndex(testSet.numAttributes() - 1); } //Evaluate model Object[] array = { predictions, new Range(), new Boolean(true) }; eval.evaluateModel(x, testSet, array); } else if (tempTest[0].equals("xval")) { //Cross validation //Build classifier x.buildClassifier(data); //Cross validate eval.crossValidateModel(x, data, Integer.parseInt(tempTest[1]), new Random(1), predictions, new Range(), true); } else if (tempTest[0].equals("leavexval")) { //Leave one out cross validation //Build classifier x.buildClassifier(data); //Cross validate eval.crossValidateModel(x, data, data.numInstances() - 1, new Random(1), predictions, new Range(), true); } else if (tempTest[0].equals("percent")) { //Percentage split of single data set //Set training and test sizes from percentage int trainSize = (int) Math.round(data.numInstances() * Double.parseDouble(tempTest[1])); int testSize = data.numInstances() - trainSize; //Load specified data Instances train = new Instances(data, 0, trainSize); Instances testSet = new Instances(data, trainSize, testSize); //Build classifier x.buildClassifier(train); //Train and evaluate model Object[] array = { predictions, new Range(), new Boolean(true) }; eval.evaluateModel(x, testSet, array); } else { //Evaluate on training data //Test and evaluate model Object[] array = { predictions, new Range(), new Boolean(true) }; eval.evaluateModel(x, data, array); } //create datafile for results String filename = dataset.getDir() + "/" + classifier.getDirName() + "/" + temp + ".txt"; PrintWriter writer = new PrintWriter(filename, "UTF-8"); //Print classifier, dataset, parameters info to file try { writer.println("CLASSIFIER: " + classifier.getName() + "\n DATASET: " + dataset.getName() + "\n PARAMETERS: " + temp); } catch (Exception e) { writer.println("CLASSIFIER: " + classifier.getName() + "\n DATASET: " + dataset.getName() + "\n PARAMETERS: " + temp); } //Add evaluation string to file writer.println(eval.toSummaryString()); //Process result options if (checkResults("stats")) { //Classifier statistics writer.println(eval.toClassDetailsString()); } if (checkResults("model")) { //The model writer.println(x.toString()); } if (checkResults("matrix")) { //Confusion matrix writer.println(eval.toMatrixString()); } if (checkResults("entropy")) { //Entropy statistics //Set options req'd to get the entropy stats String[] opt = new String[4]; opt[0] = "-t"; opt[1] = dataset.getName(); opt[2] = "-k"; opt[3] = "-v"; //Evaluate model String entropy = Evaluation.evaluateModel(x, opt); //Grab the relevant info from the results, print to file entropy = entropy.substring(entropy.indexOf("=== Stratified cross-validation ===") + 35, entropy.indexOf("=== Confusion Matrix ===")); writer.println("=== Entropy Statistics ==="); writer.println(entropy); } if (checkResults("predictions")) { //The models predictions writer.println("=== Predictions ===\n"); if (!dataset.getTest().contains("xval")) { //print header of predictions table if req'd writer.println(" inst# actual predicted error distribution ()"); } writer.println(predictions.toString()); //print predictions to file } writer.close(); //Summary file is semaphore controlled to ensure quality try { //get a permit //grab the summary file, write the classifiers details to it FlexDM.writeFile.acquire(); PrintWriter p = new PrintWriter(new FileWriter(summary, true)); if (temp.equals("results_no_parameters")) { //change output based on parameters temp = temp.substring(8); } //write percent correct, classifier name, dataset name to summary file p.write(dataset.getName() + ", " + classifier.getName() + ", " + temp + ", " + eval.correct() + ", " + eval.incorrect() + ", " + eval.unclassified() + ", " + eval.pctCorrect() + ", " + eval.pctIncorrect() + ", " + eval.pctUnclassified() + ", " + eval.kappa() + ", " + eval.meanAbsoluteError() + ", " + eval.rootMeanSquaredError() + ", " + eval.relativeAbsoluteError() + ", " + eval.rootRelativeSquaredError() + ", " + eval.SFPriorEntropy() + ", " + eval.SFSchemeEntropy() + ", " + eval.SFEntropyGain() + ", " + eval.SFMeanPriorEntropy() + ", " + eval.SFMeanSchemeEntropy() + ", " + eval.SFMeanEntropyGain() + ", " + eval.KBInformation() + ", " + eval.KBMeanInformation() + ", " + eval.KBRelativeInformation() + ", " + eval.weightedTruePositiveRate() + ", " + eval.weightedFalsePositiveRate() + ", " + eval.weightedTrueNegativeRate() + ", " + eval.weightedFalseNegativeRate() + ", " + eval.weightedPrecision() + ", " + eval.weightedRecall() + ", " + eval.weightedFMeasure() + ", " + eval.weightedAreaUnderROC() + "\n"); p.close(); //release semaphore FlexDM.writeFile.release(); } catch (InterruptedException e) { //bad things happened System.err.println("FATAL ERROR OCCURRED: Classifier: " + cNum + " - " + classifier.getName() + " on dataset " + dataset.getName()); } //output we have successfully finished processing classifier if (temp.equals("no_parameters")) { //no parameters try { System.out.println("FINISHED CLASSIFIER " + cNum + " - " + classifier.getName() + " on dataset " + dataset.getName().substring(dataset.getName().lastIndexOf("\\") + 1) + " with no parameters"); } catch (Exception e) { System.out.println("FINISHED CLASSIFIER " + cNum + " - " + classifier.getName() + " on dataset " + dataset.getName() + " with no parameters"); } } else { //with parameters try { System.out.println("FINISHED CLASSIFIER " + cNum + " - " + classifier.getName() + " on dataset " + dataset.getName().substring(dataset.getName().lastIndexOf("\\") + 1) + " with parameters " + temp); } catch (Exception e) { System.out.println("FINISHED CLASSIFIER " + cNum + " - " + classifier.getName() + " on dataset " + dataset.getName() + " with parameters " + temp); } } try { //get a permit //grab the log file, write the classifiers details to it FlexDM.writeLog.acquire(); PrintWriter p = new PrintWriter(new FileWriter(log, true)); Date date = new Date(); Format formatter = new SimpleDateFormat("dd/MM/YYYY HH:mm:ss"); //formatter.format(date) if (temp.equals("results_no_parameters")) { //change output based on parameters temp = temp.substring(8); } //write details to log file p.write(dataset.getName() + ", " + dataset.getTest() + ", \"" + dataset.getResult_string() + "\", " + classifier.getName() + ", " + temp + ", " + formatter.format(date) + "\n"); p.close(); //release semaphore FlexDM.writeLog.release(); } catch (InterruptedException e) { //bad things happened System.err.println("FATAL ERROR OCCURRED: Classifier: " + cNum + " - " + classifier.getName() + " on dataset " + dataset.getName()); } s.release(); } catch (Exception e) { //an error occurred System.err.println("FATAL ERROR OCCURRED: " + e.toString() + "\nClassifier: " + cNum + " - " + classifier.getName() + " on dataset " + dataset.getName()); s.release(); } }
From source file:adams.flow.core.EvaluationHelper.java
License:Open Source License
/** * Returns a statistical value from the evaluation object. * * @param eval the evaluation object to get the value from * @param statistic the type of value to return * @param classIndex the class label index, for statistics like AUC * @return the determined value, Double.NaN if not found * @throws Exception if evaluation fails */// w w w. jav a 2s .c o m public static double getValue(Evaluation eval, EvaluationStatistic statistic, int classIndex) throws Exception { switch (statistic) { case NUMBER_CORRECT: return eval.correct(); case NUMBER_INCORRECT: return eval.incorrect(); case NUMBER_UNCLASSIFIED: return eval.unclassified(); case PERCENT_CORRECT: return eval.pctCorrect(); case PERCENT_INCORRECT: return eval.pctIncorrect(); case PERCENT_UNCLASSIFIED: return eval.pctUnclassified(); case KAPPA_STATISTIC: return eval.kappa(); case MEAN_ABSOLUTE_ERROR: return eval.meanAbsoluteError(); case ROOT_MEAN_SQUARED_ERROR: return eval.rootMeanSquaredError(); case RELATIVE_ABSOLUTE_ERROR: return eval.relativeAbsoluteError(); case ROOT_RELATIVE_SQUARED_ERROR: return eval.rootRelativeSquaredError(); case CORRELATION_COEFFICIENT: return eval.correlationCoefficient(); case SF_PRIOR_ENTROPY: return eval.SFPriorEntropy(); case SF_SCHEME_ENTROPY: return eval.SFSchemeEntropy(); case SF_ENTROPY_GAIN: return eval.SFEntropyGain(); case SF_MEAN_PRIOR_ENTROPY: return eval.SFMeanPriorEntropy(); case SF_MEAN_SCHEME_ENTROPY: return eval.SFMeanSchemeEntropy(); case SF_MEAN_ENTROPY_GAIN: return eval.SFMeanEntropyGain(); case KB_INFORMATION: return eval.KBInformation(); case KB_MEAN_INFORMATION: return eval.KBMeanInformation(); case KB_RELATIVE_INFORMATION: return eval.KBRelativeInformation(); case TRUE_POSITIVE_RATE: return eval.truePositiveRate(classIndex); case NUM_TRUE_POSITIVES: return eval.numTruePositives(classIndex); case FALSE_POSITIVE_RATE: return eval.falsePositiveRate(classIndex); case NUM_FALSE_POSITIVES: return eval.numFalsePositives(classIndex); case TRUE_NEGATIVE_RATE: return eval.trueNegativeRate(classIndex); case NUM_TRUE_NEGATIVES: return eval.numTrueNegatives(classIndex); case FALSE_NEGATIVE_RATE: return eval.falseNegativeRate(classIndex); case NUM_FALSE_NEGATIVES: return eval.numFalseNegatives(classIndex); case IR_PRECISION: return eval.precision(classIndex); case IR_RECALL: return eval.recall(classIndex); case F_MEASURE: return eval.fMeasure(classIndex); case MATTHEWS_CORRELATION_COEFFICIENT: return eval.matthewsCorrelationCoefficient(classIndex); case AREA_UNDER_ROC: return eval.areaUnderROC(classIndex); case AREA_UNDER_PRC: return eval.areaUnderPRC(classIndex); case WEIGHTED_TRUE_POSITIVE_RATE: return eval.weightedTruePositiveRate(); case WEIGHTED_FALSE_POSITIVE_RATE: return eval.weightedFalsePositiveRate(); case WEIGHTED_TRUE_NEGATIVE_RATE: return eval.weightedTrueNegativeRate(); case WEIGHTED_FALSE_NEGATIVE_RATE: return eval.weightedFalseNegativeRate(); case WEIGHTED_IR_PRECISION: return eval.weightedPrecision(); case WEIGHTED_IR_RECALL: return eval.weightedRecall(); case WEIGHTED_F_MEASURE: return eval.weightedFMeasure(); case WEIGHTED_MATTHEWS_CORRELATION_COEFFICIENT: return eval.weightedMatthewsCorrelation(); case WEIGHTED_AREA_UNDER_ROC: return eval.weightedAreaUnderROC(); case WEIGHTED_AREA_UNDER_PRC: return eval.weightedAreaUnderPRC(); case UNWEIGHTED_MACRO_F_MEASURE: return eval.unweightedMacroFmeasure(); case UNWEIGHTED_MICRO_F_MEASURE: return eval.unweightedMicroFmeasure(); case BIAS: return eval.getPluginMetric(Bias.class.getName()).getStatistic(Bias.NAME); case RSQUARED: return eval.getPluginMetric(RSquared.class.getName()).getStatistic(RSquared.NAME); case SDR: return eval.getPluginMetric(SDR.class.getName()).getStatistic(SDR.NAME); case RPD: return eval.getPluginMetric(RPD.class.getName()).getStatistic(RPD.NAME); default: throw new IllegalArgumentException("Unhandled statistic field: " + statistic); } }
From source file:net.sf.jclal.evaluation.measure.SingleLabelEvaluation.java
License:Open Source License
/** * * @param evaluation The evaluation/* w ww . j ava 2s . c om*/ */ public void setEvaluation(Evaluation evaluation) { try { this.evaluation = evaluation; StringBuilder st = new StringBuilder(); st.append("Iteration: ").append(getIteration()).append("\n"); st.append("Labeled set size: ").append(getLabeledSetSize()).append("\n"); st.append("Unlabelled set size: ").append(getUnlabeledSetSize()).append("\n"); st.append("\t\n"); st.append("Correctly Classified Instances: ").append(evaluation.pctCorrect()).append("\n"); st.append("Incorrectly Classified Instances: ").append(evaluation.pctIncorrect()).append("\n"); st.append("Kappa statistic: ").append(evaluation.kappa()).append("\n"); st.append("Mean absolute error: ").append(evaluation.meanAbsoluteError()).append("\n"); st.append("Root mean squared error: ").append(evaluation.rootMeanSquaredError()).append("\n"); st.append("Relative absolute error: ").append(evaluation.relativeAbsoluteError()).append("\n"); st.append("Root relative squared error: ").append(evaluation.rootRelativeSquaredError()).append("\n"); st.append("Coverage of cases: ").append(evaluation.coverageOfTestCasesByPredictedRegions()) .append("\n"); st.append("Mean region size: ").append(evaluation.sizeOfPredictedRegions()).append("\n"); st.append("Weighted Precision: ").append(evaluation.weightedPrecision()).append("\n"); st.append("Weighted Recall: ").append(evaluation.weightedRecall()).append("\n"); st.append("Weighted FMeasure: ").append(evaluation.weightedFMeasure()).append("\n"); st.append("Weighted TruePositiveRate: ").append(evaluation.weightedTruePositiveRate()).append("\n"); st.append("Weighted FalsePositiveRate: ").append(evaluation.weightedFalsePositiveRate()).append("\n"); st.append("Weighted MatthewsCorrelation: ").append(evaluation.weightedMatthewsCorrelation()) .append("\n"); st.append("Weighted AreaUnderROC: ").append(evaluation.weightedAreaUnderROC()).append("\n"); st.append("Weighted AreaUnderPRC: ").append(evaluation.weightedAreaUnderPRC()).append("\n"); st.append("\t\t\n"); loadMetrics(st.toString()); } catch (Exception e) { Logger.getLogger(SingleLabelEvaluation.class.getName()).log(Level.SEVERE, null, e); } }
From source file:org.openml.webapplication.fantail.dc.landmarking.J48BasedLandmarker.java
License:Open Source License
public Map<String, Double> characterize(Instances data) { int numFolds = m_NumFolds; double score1 = 0.5; double score2 = 0.5; // double score3 = 0.5; double score3 = 0.5; double score4 = 0.5; // double score3 = 0.5; double score5 = 0.5; double score6 = 0.5; double score7 = 0.5; double score8 = 0.5; double score9 = 0.5; weka.classifiers.trees.J48 cls = new weka.classifiers.trees.J48(); cls.setConfidenceFactor(0.00001f);//from www. j a v a 2 s .co m try { weka.classifiers.Evaluation eval = new weka.classifiers.Evaluation(data); eval.crossValidateModel(cls, data, numFolds, new java.util.Random(1)); score1 = eval.pctIncorrect(); score2 = eval.weightedAreaUnderROC(); score7 = eval.kappa(); } catch (Exception e) { e.printStackTrace(); } // cls = new weka.classifiers.trees.J48(); cls.setConfidenceFactor(0.0001f); try { weka.classifiers.Evaluation eval = new weka.classifiers.Evaluation(data); eval.crossValidateModel(cls, data, numFolds, new java.util.Random(1)); score3 = eval.pctIncorrect(); score4 = eval.weightedAreaUnderROC(); score8 = eval.kappa(); } catch (Exception e) { e.printStackTrace(); } // cls = new weka.classifiers.trees.J48(); cls.setConfidenceFactor(0.001f); try { weka.classifiers.Evaluation eval = new weka.classifiers.Evaluation(data); eval.crossValidateModel(cls, data, numFolds, new java.util.Random(1)); score5 = eval.pctIncorrect(); score6 = eval.weightedAreaUnderROC(); score9 = eval.kappa(); } catch (Exception e) { e.printStackTrace(); } Map<String, Double> qualities = new HashMap<String, Double>(); qualities.put(ids[0], score1); qualities.put(ids[1], score2); qualities.put(ids[2], score3); qualities.put(ids[3], score4); qualities.put(ids[4], score5); qualities.put(ids[5], score6); qualities.put(ids[6], score7); qualities.put(ids[7], score8); qualities.put(ids[8], score9); return qualities; }
From source file:org.openml.webapplication.fantail.dc.landmarking.RandomTreeBasedLandmarker.java
License:Open Source License
public Map<String, Double> characterize(Instances data) { int numFolds = m_NumFolds; double score1 = 0.5; double score2 = 0.5; double score3 = 0.5; weka.classifiers.trees.RandomTree cls = new weka.classifiers.trees.RandomTree(); cls.setSeed(m_Seed);// w w w . j a va 2 s . c om cls.setMaxDepth(1); try { // ds.buildClassifier(data); weka.classifiers.Evaluation eval = new weka.classifiers.Evaluation(data); eval.crossValidateModel(cls, data, numFolds, new java.util.Random(1)); score1 = eval.weightedAreaUnderROC(); } catch (Exception e) { e.printStackTrace(); } // cls = new weka.classifiers.trees.RandomTree(); cls.setSeed(m_Seed); cls.setMaxDepth(2); try { weka.classifiers.Evaluation eval = new weka.classifiers.Evaluation(data); eval.crossValidateModel(cls, data, numFolds, new java.util.Random(1)); score2 = eval.weightedAreaUnderROC(); } catch (Exception e) { e.printStackTrace(); } // cls = new weka.classifiers.trees.RandomTree(); cls.setSeed(m_Seed); cls.setMaxDepth(3); try { weka.classifiers.Evaluation eval = new weka.classifiers.Evaluation(data); eval.crossValidateModel(cls, data, numFolds, new java.util.Random(1)); score3 = eval.weightedAreaUnderROC(); } catch (Exception e) { e.printStackTrace(); } Map<String, Double> qualities = new HashMap<String, Double>(); qualities.put(ids[0], score1); qualities.put(ids[1], score2); qualities.put(ids[2], score3); return qualities; }
From source file:org.openml.webapplication.fantail.dc.landmarking.REPTreeBasedLandmarker.java
License:Open Source License
public Map<String, Double> characterize(Instances data) { int numFolds = m_NumFolds; double score1 = 0.5; double score2 = 0.5; // double score3 = 0.5; double score3 = 0.5; double score4 = 0.5; // double score3 = 0.5; double score5 = 0.5; double score6 = 0.5; double score7 = 0.5; double score8 = 0.5; double score9 = 0.5; weka.classifiers.trees.REPTree cls = new weka.classifiers.trees.REPTree(); cls.setMaxDepth(1);//from w w w . j a v a 2 s . c om try { weka.classifiers.Evaluation eval = new weka.classifiers.Evaluation(data); eval.crossValidateModel(cls, data, numFolds, new java.util.Random(1)); score1 = eval.pctIncorrect(); score2 = eval.weightedAreaUnderROC(); score7 = eval.kappa(); } catch (Exception e) { e.printStackTrace(); } // cls = new weka.classifiers.trees.REPTree(); cls.setMaxDepth(2); try { weka.classifiers.Evaluation eval = new weka.classifiers.Evaluation(data); eval.crossValidateModel(cls, data, numFolds, new java.util.Random(1)); score3 = eval.pctIncorrect(); score4 = eval.weightedAreaUnderROC(); score8 = eval.kappa(); } catch (Exception e) { e.printStackTrace(); } // cls = new weka.classifiers.trees.REPTree(); cls.setMaxDepth(3); try { weka.classifiers.Evaluation eval = new weka.classifiers.Evaluation(data); eval.crossValidateModel(cls, data, numFolds, new java.util.Random(1)); score5 = eval.pctIncorrect(); score6 = eval.weightedAreaUnderROC(); score9 = eval.kappa(); } catch (Exception e) { e.printStackTrace(); } Map<String, Double> qualities = new HashMap<String, Double>(); qualities.put(ids[0], score1); qualities.put(ids[1], score2); qualities.put(ids[2], score3); qualities.put(ids[3], score4); qualities.put(ids[4], score5); qualities.put(ids[5], score6); qualities.put(ids[6], score7); qualities.put(ids[7], score8); qualities.put(ids[8], score9); return qualities; }
From source file:org.openml.webapplication.fantail.dc.landmarking.SimpleLandmarkers.java
License:Open Source License
public Map<String, Double> characterize(Instances data) { int numFolds = m_NumFolds; double score1 = 0.5; double score2 = 0.5; double score5 = 0.5; double score6 = 0.5; double score3 = 0.5; double score4 = 0.5; weka.classifiers.trees.DecisionStump ds = new weka.classifiers.trees.DecisionStump(); try {//w w w .j a v a 2 s .com weka.classifiers.Evaluation eval = new weka.classifiers.Evaluation(data); eval.crossValidateModel(ds, data, numFolds, new java.util.Random(1)); score1 = eval.pctIncorrect(); score2 = eval.weightedAreaUnderROC(); score3 = eval.kappa(); } catch (Exception e) { e.printStackTrace(); } try { weka.classifiers.bayes.NaiveBayes nb = new weka.classifiers.bayes.NaiveBayes(); weka.classifiers.Evaluation eval = new weka.classifiers.Evaluation(data); eval.crossValidateModel(nb, data, numFolds, new java.util.Random(1)); score5 = eval.pctIncorrect(); score6 = eval.weightedAreaUnderROC(); score4 = eval.kappa(); } catch (Exception e) { e.printStackTrace(); } Map<String, Double> qualities = new HashMap<String, Double>(); qualities.put(ids[0], score1); qualities.put(ids[1], score2); qualities.put(ids[2], score5); qualities.put(ids[3], score6); qualities.put(ids[4], score3); qualities.put(ids[5], score4); return qualities; }
From source file:org.openml.webapplication.io.Output.java
License:Open Source License
public static Map<Metric, MetricScore> evaluatorToMap(Evaluation evaluator, int classes, TaskType task) throws Exception { Map<Metric, MetricScore> m = new HashMap<Metric, MetricScore>(); if (task == TaskType.REGRESSION) { // here all measures for regression tasks m.put(new Metric("mean_absolute_error", "openml.evaluation.mean_absolute_error(1.0)"), new MetricScore(evaluator.meanAbsoluteError(), (int) evaluator.numInstances())); m.put(new Metric("mean_prior_absolute_error", "openml.evaluation.mean_prior_absolute_error(1.0)"), new MetricScore(evaluator.meanPriorAbsoluteError(), (int) evaluator.numInstances())); m.put(new Metric("root_mean_squared_error", "openml.evaluation.root_mean_squared_error(1.0)"), new MetricScore(evaluator.rootMeanSquaredError(), (int) evaluator.numInstances())); m.put(new Metric("root_mean_prior_squared_error", "openml.evaluation.root_mean_prior_squared_error(1.0)"), new MetricScore(evaluator.rootMeanPriorSquaredError(), (int) evaluator.numInstances())); m.put(new Metric("relative_absolute_error", "openml.evaluation.relative_absolute_error(1.0)"), new MetricScore(evaluator.relativeAbsoluteError() / 100, (int) evaluator.numInstances())); m.put(new Metric("root_relative_squared_error", "openml.evaluation.root_relative_squared_error(1.0)"), new MetricScore(evaluator.rootRelativeSquaredError() / 100, (int) evaluator.numInstances())); } else if (task == TaskType.CLASSIFICATION || task == TaskType.LEARNINGCURVE || task == TaskType.TESTTHENTRAIN) { m.put(new Metric("average_cost", "openml.evaluation.average_cost(1.0)"), new MetricScore(evaluator.avgCost(), (int) evaluator.numInstances())); m.put(new Metric("total_cost", "openml.evaluation.total_cost(1.0)"), new MetricScore(evaluator.totalCost(), (int) evaluator.numInstances())); m.put(new Metric("mean_absolute_error", "openml.evaluation.mean_absolute_error(1.0)"), new MetricScore(evaluator.meanAbsoluteError(), (int) evaluator.numInstances())); m.put(new Metric("mean_prior_absolute_error", "openml.evaluation.mean_prior_absolute_error(1.0)"), new MetricScore(evaluator.meanPriorAbsoluteError(), (int) evaluator.numInstances())); m.put(new Metric("root_mean_squared_error", "openml.evaluation.root_mean_squared_error(1.0)"), new MetricScore(evaluator.rootMeanSquaredError(), (int) evaluator.numInstances())); m.put(new Metric("root_mean_prior_squared_error", "openml.evaluation.root_mean_prior_squared_error(1.0)"), new MetricScore(evaluator.rootMeanPriorSquaredError(), (int) evaluator.numInstances())); m.put(new Metric("relative_absolute_error", "openml.evaluation.relative_absolute_error(1.0)"), new MetricScore(evaluator.relativeAbsoluteError() / 100, (int) evaluator.numInstances())); m.put(new Metric("root_relative_squared_error", "openml.evaluation.root_relative_squared_error(1.0)"), new MetricScore(evaluator.rootRelativeSquaredError() / 100, (int) evaluator.numInstances())); m.put(new Metric("prior_entropy", "openml.evaluation.prior_entropy(1.0)"), new MetricScore(evaluator.priorEntropy(), (int) evaluator.numInstances())); m.put(new Metric("kb_relative_information_score", "openml.evaluation.kb_relative_information_score(1.0)"), new MetricScore(evaluator.KBRelativeInformation() / 100, (int) evaluator.numInstances())); Double[] precision = new Double[classes]; Double[] recall = new Double[classes]; Double[] auroc = new Double[classes]; Double[] fMeasure = new Double[classes]; Double[] instancesPerClass = new Double[classes]; double[][] confussion_matrix = evaluator.confusionMatrix(); for (int i = 0; i < classes; ++i) { precision[i] = evaluator.precision(i); recall[i] = evaluator.recall(i); auroc[i] = evaluator.areaUnderROC(i); fMeasure[i] = evaluator.fMeasure(i); instancesPerClass[i] = 0.0;/*w w w.j a v a 2 s. co m*/ for (int j = 0; j < classes; ++j) { instancesPerClass[i] += confussion_matrix[i][j]; } } m.put(new Metric("predictive_accuracy", "openml.evaluation.predictive_accuracy(1.0)"), new MetricScore(evaluator.pctCorrect() / 100, (int) evaluator.numInstances())); m.put(new Metric("kappa", "openml.evaluation.kappa(1.0)"), new MetricScore(evaluator.kappa(), (int) evaluator.numInstances())); m.put(new Metric("number_of_instances", "openml.evaluation.number_of_instances(1.0)"), new MetricScore(evaluator.numInstances(), instancesPerClass, (int) evaluator.numInstances())); m.put(new Metric("precision", "openml.evaluation.precision(1.0)"), new MetricScore(evaluator.weightedPrecision(), precision, (int) evaluator.numInstances())); m.put(new Metric("recall", "openml.evaluation.recall(1.0)"), new MetricScore(evaluator.weightedRecall(), recall, (int) evaluator.numInstances())); m.put(new Metric("f_measure", "openml.evaluation.f_measure(1.0)"), new MetricScore(evaluator.weightedFMeasure(), fMeasure, (int) evaluator.numInstances())); if (Utils.isMissingValue(evaluator.weightedAreaUnderROC()) == false) { m.put(new Metric("area_under_roc_curve", "openml.evaluation.area_under_roc_curve(1.0)"), new MetricScore(evaluator.weightedAreaUnderROC(), auroc, (int) evaluator.numInstances())); } m.put(new Metric("confusion_matrix", "openml.evaluation.confusion_matrix(1.0)"), new MetricScore(confussion_matrix)); } return m; }