List of usage examples for weka.classifiers Evaluation weightedFMeasure
public double weightedFMeasure()
From source file:net.sf.jclal.evaluation.measure.SingleLabelEvaluation.java
License:Open Source License
/** * * @param evaluation The evaluation/* ww w. ja v a 2s. c o m*/ */ public void setEvaluation(Evaluation evaluation) { try { this.evaluation = evaluation; StringBuilder st = new StringBuilder(); st.append("Iteration: ").append(getIteration()).append("\n"); st.append("Labeled set size: ").append(getLabeledSetSize()).append("\n"); st.append("Unlabelled set size: ").append(getUnlabeledSetSize()).append("\n"); st.append("\t\n"); st.append("Correctly Classified Instances: ").append(evaluation.pctCorrect()).append("\n"); st.append("Incorrectly Classified Instances: ").append(evaluation.pctIncorrect()).append("\n"); st.append("Kappa statistic: ").append(evaluation.kappa()).append("\n"); st.append("Mean absolute error: ").append(evaluation.meanAbsoluteError()).append("\n"); st.append("Root mean squared error: ").append(evaluation.rootMeanSquaredError()).append("\n"); st.append("Relative absolute error: ").append(evaluation.relativeAbsoluteError()).append("\n"); st.append("Root relative squared error: ").append(evaluation.rootRelativeSquaredError()).append("\n"); st.append("Coverage of cases: ").append(evaluation.coverageOfTestCasesByPredictedRegions()) .append("\n"); st.append("Mean region size: ").append(evaluation.sizeOfPredictedRegions()).append("\n"); st.append("Weighted Precision: ").append(evaluation.weightedPrecision()).append("\n"); st.append("Weighted Recall: ").append(evaluation.weightedRecall()).append("\n"); st.append("Weighted FMeasure: ").append(evaluation.weightedFMeasure()).append("\n"); st.append("Weighted TruePositiveRate: ").append(evaluation.weightedTruePositiveRate()).append("\n"); st.append("Weighted FalsePositiveRate: ").append(evaluation.weightedFalsePositiveRate()).append("\n"); st.append("Weighted MatthewsCorrelation: ").append(evaluation.weightedMatthewsCorrelation()) .append("\n"); st.append("Weighted AreaUnderROC: ").append(evaluation.weightedAreaUnderROC()).append("\n"); st.append("Weighted AreaUnderPRC: ").append(evaluation.weightedAreaUnderPRC()).append("\n"); st.append("\t\t\n"); loadMetrics(st.toString()); } catch (Exception e) { Logger.getLogger(SingleLabelEvaluation.class.getName()).log(Level.SEVERE, null, e); } }
From source file:org.openml.webapplication.io.Output.java
License:Open Source License
public static Map<Metric, MetricScore> evaluatorToMap(Evaluation evaluator, int classes, TaskType task) throws Exception { Map<Metric, MetricScore> m = new HashMap<Metric, MetricScore>(); if (task == TaskType.REGRESSION) { // here all measures for regression tasks m.put(new Metric("mean_absolute_error", "openml.evaluation.mean_absolute_error(1.0)"), new MetricScore(evaluator.meanAbsoluteError(), (int) evaluator.numInstances())); m.put(new Metric("mean_prior_absolute_error", "openml.evaluation.mean_prior_absolute_error(1.0)"), new MetricScore(evaluator.meanPriorAbsoluteError(), (int) evaluator.numInstances())); m.put(new Metric("root_mean_squared_error", "openml.evaluation.root_mean_squared_error(1.0)"), new MetricScore(evaluator.rootMeanSquaredError(), (int) evaluator.numInstances())); m.put(new Metric("root_mean_prior_squared_error", "openml.evaluation.root_mean_prior_squared_error(1.0)"), new MetricScore(evaluator.rootMeanPriorSquaredError(), (int) evaluator.numInstances())); m.put(new Metric("relative_absolute_error", "openml.evaluation.relative_absolute_error(1.0)"), new MetricScore(evaluator.relativeAbsoluteError() / 100, (int) evaluator.numInstances())); m.put(new Metric("root_relative_squared_error", "openml.evaluation.root_relative_squared_error(1.0)"), new MetricScore(evaluator.rootRelativeSquaredError() / 100, (int) evaluator.numInstances())); } else if (task == TaskType.CLASSIFICATION || task == TaskType.LEARNINGCURVE || task == TaskType.TESTTHENTRAIN) { m.put(new Metric("average_cost", "openml.evaluation.average_cost(1.0)"), new MetricScore(evaluator.avgCost(), (int) evaluator.numInstances())); m.put(new Metric("total_cost", "openml.evaluation.total_cost(1.0)"), new MetricScore(evaluator.totalCost(), (int) evaluator.numInstances())); m.put(new Metric("mean_absolute_error", "openml.evaluation.mean_absolute_error(1.0)"), new MetricScore(evaluator.meanAbsoluteError(), (int) evaluator.numInstances())); m.put(new Metric("mean_prior_absolute_error", "openml.evaluation.mean_prior_absolute_error(1.0)"), new MetricScore(evaluator.meanPriorAbsoluteError(), (int) evaluator.numInstances())); m.put(new Metric("root_mean_squared_error", "openml.evaluation.root_mean_squared_error(1.0)"), new MetricScore(evaluator.rootMeanSquaredError(), (int) evaluator.numInstances())); m.put(new Metric("root_mean_prior_squared_error", "openml.evaluation.root_mean_prior_squared_error(1.0)"), new MetricScore(evaluator.rootMeanPriorSquaredError(), (int) evaluator.numInstances())); m.put(new Metric("relative_absolute_error", "openml.evaluation.relative_absolute_error(1.0)"), new MetricScore(evaluator.relativeAbsoluteError() / 100, (int) evaluator.numInstances())); m.put(new Metric("root_relative_squared_error", "openml.evaluation.root_relative_squared_error(1.0)"), new MetricScore(evaluator.rootRelativeSquaredError() / 100, (int) evaluator.numInstances())); m.put(new Metric("prior_entropy", "openml.evaluation.prior_entropy(1.0)"), new MetricScore(evaluator.priorEntropy(), (int) evaluator.numInstances())); m.put(new Metric("kb_relative_information_score", "openml.evaluation.kb_relative_information_score(1.0)"), new MetricScore(evaluator.KBRelativeInformation() / 100, (int) evaluator.numInstances())); Double[] precision = new Double[classes]; Double[] recall = new Double[classes]; Double[] auroc = new Double[classes]; Double[] fMeasure = new Double[classes]; Double[] instancesPerClass = new Double[classes]; double[][] confussion_matrix = evaluator.confusionMatrix(); for (int i = 0; i < classes; ++i) { precision[i] = evaluator.precision(i); recall[i] = evaluator.recall(i); auroc[i] = evaluator.areaUnderROC(i); fMeasure[i] = evaluator.fMeasure(i); instancesPerClass[i] = 0.0;/*from www. j a v a2 s.c om*/ for (int j = 0; j < classes; ++j) { instancesPerClass[i] += confussion_matrix[i][j]; } } m.put(new Metric("predictive_accuracy", "openml.evaluation.predictive_accuracy(1.0)"), new MetricScore(evaluator.pctCorrect() / 100, (int) evaluator.numInstances())); m.put(new Metric("kappa", "openml.evaluation.kappa(1.0)"), new MetricScore(evaluator.kappa(), (int) evaluator.numInstances())); m.put(new Metric("number_of_instances", "openml.evaluation.number_of_instances(1.0)"), new MetricScore(evaluator.numInstances(), instancesPerClass, (int) evaluator.numInstances())); m.put(new Metric("precision", "openml.evaluation.precision(1.0)"), new MetricScore(evaluator.weightedPrecision(), precision, (int) evaluator.numInstances())); m.put(new Metric("recall", "openml.evaluation.recall(1.0)"), new MetricScore(evaluator.weightedRecall(), recall, (int) evaluator.numInstances())); m.put(new Metric("f_measure", "openml.evaluation.f_measure(1.0)"), new MetricScore(evaluator.weightedFMeasure(), fMeasure, (int) evaluator.numInstances())); if (Utils.isMissingValue(evaluator.weightedAreaUnderROC()) == false) { m.put(new Metric("area_under_roc_curve", "openml.evaluation.area_under_roc_curve(1.0)"), new MetricScore(evaluator.weightedAreaUnderROC(), auroc, (int) evaluator.numInstances())); } m.put(new Metric("confusion_matrix", "openml.evaluation.confusion_matrix(1.0)"), new MetricScore(confussion_matrix)); } return m; }
From source file:sentinets.Prediction.java
License:Open Source License
public String updateModel(String inputFile, ArrayList<Double[]> metrics) { String output = ""; this.setInstances(inputFile); FilteredClassifier fcls = (FilteredClassifier) this.cls; SGD cls = (SGD) fcls.getClassifier(); Filter filter = fcls.getFilter(); Instances insAll;/*from w w w .j a v a 2 s.co m*/ try { insAll = Filter.useFilter(this.unlabled, filter); if (insAll.size() > 0) { Random rand = new Random(10); int folds = 10 > insAll.size() ? 2 : 10; Instances randData = new Instances(insAll); randData.randomize(rand); if (randData.classAttribute().isNominal()) { randData.stratify(folds); } Evaluation eval = new Evaluation(randData); eval.evaluateModel(cls, insAll); System.out.println("Initial Evaluation"); System.out.println(eval.toSummaryString()); System.out.println(eval.toClassDetailsString()); metrics.add(new Double[] { eval.fMeasure(0), eval.fMeasure(1), eval.weightedFMeasure() }); output += "\n====" + "Initial Evaluation" + "====\n"; output += "\n" + eval.toSummaryString(); output += "\n" + eval.toClassDetailsString(); System.out.println("Cross Validated Evaluation"); output += "\n====" + "Cross Validated Evaluation" + "====\n"; for (int n = 0; n < folds; n++) { Instances train = randData.trainCV(folds, n); Instances test = randData.testCV(folds, n); for (int i = 0; i < train.numInstances(); i++) { cls.updateClassifier(train.instance(i)); } eval.evaluateModel(cls, test); System.out.println("Cross Validated Evaluation fold: " + n); output += "\n====" + "Cross Validated Evaluation fold (" + n + ")====\n"; System.out.println(eval.toSummaryString()); System.out.println(eval.toClassDetailsString()); output += "\n" + eval.toSummaryString(); output += "\n" + eval.toClassDetailsString(); metrics.add(new Double[] { eval.fMeasure(0), eval.fMeasure(1), eval.weightedFMeasure() }); } for (int i = 0; i < insAll.numInstances(); i++) { cls.updateClassifier(insAll.instance(i)); } eval.evaluateModel(cls, insAll); System.out.println("Final Evaluation"); System.out.println(eval.toSummaryString()); System.out.println(eval.toClassDetailsString()); output += "\n====" + "Final Evaluation" + "====\n"; output += "\n" + eval.toSummaryString(); output += "\n" + eval.toClassDetailsString(); metrics.add(new Double[] { eval.fMeasure(0), eval.fMeasure(1), eval.weightedFMeasure() }); fcls.setClassifier(cls); String modelFilePath = outputDir + "/" + Utils.getOutDir(Utils.OutDirIndex.MODELS) + "/updatedClassifier.model"; weka.core.SerializationHelper.write(modelFilePath, fcls); output += "\n" + "Updated Model saved at: " + modelFilePath; } else { output += "No new instances for training the model."; } } catch (Exception e) { e.printStackTrace(); } return output; }