Example usage for weka.classifiers Evaluation totalCost

List of usage examples for weka.classifiers Evaluation totalCost

Introduction

In this page you can find the example usage for weka.classifiers Evaluation totalCost.

Prototype

public final double totalCost() 

Source Link

Document

Gets the total cost, that is, the cost of each prediction times the weight of the instance, summed over all instances.

Usage

From source file:org.openml.webapplication.io.Output.java

License:Open Source License

public static Map<Metric, MetricScore> evaluatorToMap(Evaluation evaluator, int classes, TaskType task)
        throws Exception {
    Map<Metric, MetricScore> m = new HashMap<Metric, MetricScore>();

    if (task == TaskType.REGRESSION) {

        // here all measures for regression tasks
        m.put(new Metric("mean_absolute_error", "openml.evaluation.mean_absolute_error(1.0)"),
                new MetricScore(evaluator.meanAbsoluteError(), (int) evaluator.numInstances()));
        m.put(new Metric("mean_prior_absolute_error", "openml.evaluation.mean_prior_absolute_error(1.0)"),
                new MetricScore(evaluator.meanPriorAbsoluteError(), (int) evaluator.numInstances()));
        m.put(new Metric("root_mean_squared_error", "openml.evaluation.root_mean_squared_error(1.0)"),
                new MetricScore(evaluator.rootMeanSquaredError(), (int) evaluator.numInstances()));
        m.put(new Metric("root_mean_prior_squared_error",
                "openml.evaluation.root_mean_prior_squared_error(1.0)"),
                new MetricScore(evaluator.rootMeanPriorSquaredError(), (int) evaluator.numInstances()));
        m.put(new Metric("relative_absolute_error", "openml.evaluation.relative_absolute_error(1.0)"),
                new MetricScore(evaluator.relativeAbsoluteError() / 100, (int) evaluator.numInstances()));
        m.put(new Metric("root_relative_squared_error", "openml.evaluation.root_relative_squared_error(1.0)"),
                new MetricScore(evaluator.rootRelativeSquaredError() / 100, (int) evaluator.numInstances()));

    } else if (task == TaskType.CLASSIFICATION || task == TaskType.LEARNINGCURVE
            || task == TaskType.TESTTHENTRAIN) {

        m.put(new Metric("average_cost", "openml.evaluation.average_cost(1.0)"),
                new MetricScore(evaluator.avgCost(), (int) evaluator.numInstances()));
        m.put(new Metric("total_cost", "openml.evaluation.total_cost(1.0)"),
                new MetricScore(evaluator.totalCost(), (int) evaluator.numInstances()));

        m.put(new Metric("mean_absolute_error", "openml.evaluation.mean_absolute_error(1.0)"),
                new MetricScore(evaluator.meanAbsoluteError(), (int) evaluator.numInstances()));
        m.put(new Metric("mean_prior_absolute_error", "openml.evaluation.mean_prior_absolute_error(1.0)"),
                new MetricScore(evaluator.meanPriorAbsoluteError(), (int) evaluator.numInstances()));
        m.put(new Metric("root_mean_squared_error", "openml.evaluation.root_mean_squared_error(1.0)"),
                new MetricScore(evaluator.rootMeanSquaredError(), (int) evaluator.numInstances()));
        m.put(new Metric("root_mean_prior_squared_error",
                "openml.evaluation.root_mean_prior_squared_error(1.0)"),
                new MetricScore(evaluator.rootMeanPriorSquaredError(), (int) evaluator.numInstances()));
        m.put(new Metric("relative_absolute_error", "openml.evaluation.relative_absolute_error(1.0)"),
                new MetricScore(evaluator.relativeAbsoluteError() / 100, (int) evaluator.numInstances()));
        m.put(new Metric("root_relative_squared_error", "openml.evaluation.root_relative_squared_error(1.0)"),
                new MetricScore(evaluator.rootRelativeSquaredError() / 100, (int) evaluator.numInstances()));

        m.put(new Metric("prior_entropy", "openml.evaluation.prior_entropy(1.0)"),
                new MetricScore(evaluator.priorEntropy(), (int) evaluator.numInstances()));
        m.put(new Metric("kb_relative_information_score",
                "openml.evaluation.kb_relative_information_score(1.0)"),
                new MetricScore(evaluator.KBRelativeInformation() / 100, (int) evaluator.numInstances()));

        Double[] precision = new Double[classes];
        Double[] recall = new Double[classes];
        Double[] auroc = new Double[classes];
        Double[] fMeasure = new Double[classes];
        Double[] instancesPerClass = new Double[classes];
        double[][] confussion_matrix = evaluator.confusionMatrix();
        for (int i = 0; i < classes; ++i) {
            precision[i] = evaluator.precision(i);
            recall[i] = evaluator.recall(i);
            auroc[i] = evaluator.areaUnderROC(i);
            fMeasure[i] = evaluator.fMeasure(i);
            instancesPerClass[i] = 0.0;/*w w  w  .j a  v  a  2s. c  om*/
            for (int j = 0; j < classes; ++j) {
                instancesPerClass[i] += confussion_matrix[i][j];
            }
        }

        m.put(new Metric("predictive_accuracy", "openml.evaluation.predictive_accuracy(1.0)"),
                new MetricScore(evaluator.pctCorrect() / 100, (int) evaluator.numInstances()));
        m.put(new Metric("kappa", "openml.evaluation.kappa(1.0)"),
                new MetricScore(evaluator.kappa(), (int) evaluator.numInstances()));

        m.put(new Metric("number_of_instances", "openml.evaluation.number_of_instances(1.0)"),
                new MetricScore(evaluator.numInstances(), instancesPerClass, (int) evaluator.numInstances()));

        m.put(new Metric("precision", "openml.evaluation.precision(1.0)"),
                new MetricScore(evaluator.weightedPrecision(), precision, (int) evaluator.numInstances()));
        m.put(new Metric("recall", "openml.evaluation.recall(1.0)"),
                new MetricScore(evaluator.weightedRecall(), recall, (int) evaluator.numInstances()));
        m.put(new Metric("f_measure", "openml.evaluation.f_measure(1.0)"),
                new MetricScore(evaluator.weightedFMeasure(), fMeasure, (int) evaluator.numInstances()));
        if (Utils.isMissingValue(evaluator.weightedAreaUnderROC()) == false) {
            m.put(new Metric("area_under_roc_curve", "openml.evaluation.area_under_roc_curve(1.0)"),
                    new MetricScore(evaluator.weightedAreaUnderROC(), auroc, (int) evaluator.numInstances()));
        }
        m.put(new Metric("confusion_matrix", "openml.evaluation.confusion_matrix(1.0)"),
                new MetricScore(confussion_matrix));
    }
    return m;
}