Example usage for weka.classifiers Evaluation pctCorrect

List of usage examples for weka.classifiers Evaluation pctCorrect

Introduction

In this page you can find the example usage for weka.classifiers Evaluation pctCorrect.

Prototype

public final double pctCorrect() 

Source Link

Document

Gets the percentage of instances correctly classified (that is, for which a correct prediction was made).

Usage

From source file:function.PercentageSplit.java

public static double percentageSplitRate(Instances data, Classifier cls) throws Exception {
    int trainSize = (int) Math.round(data.numInstances() * 0.8);
    int testSize = data.numInstances() - trainSize;
    Instances train = new Instances(data, 0, trainSize);
    Instances test = new Instances(data, trainSize, testSize);

    Evaluation eval = new Evaluation(train);
    eval.evaluateModel(cls, test);//from   w w w .  j  a v a  2s  . c  o m
    return eval.pctCorrect();
}

From source file:hero.unstable.util.classification.wekaClassifier.java

/** Result as:
 * [correctClassified, TPR(class True), TPR(class False), avgTPR, PPV(class True), PPV(class False), avgPPV,  Fvalue(class True), Fvalue(class False), avgFvalue]   
 * @param result//from   www  .  j a va 2s .c om
 * @return 10 metrics
 */
public double[] getMetrics(Evaluation result) {
    double[] metrics = new double[10];

    metrics[0] = result.pctCorrect() / 100;

    metrics[1] = result.precision(0);
    metrics[2] = result.precision(1);
    metrics[3] = result.weightedPrecision();

    metrics[4] = result.recall(0);
    metrics[5] = result.recall(1);
    metrics[6] = result.weightedRecall();

    metrics[7] = result.fMeasure(0);
    metrics[8] = result.fMeasure(1);
    metrics[9] = result.weightedFMeasure();

    return metrics;
}

From source file:mlpoc.MLPOC.java

public static Evaluation crossValidate(String filename) {
    Evaluation eval = null;
    try {//from  ww w .ja  v a  2 s  .co  m
        BufferedReader br = new BufferedReader(new FileReader(filename));
        // loads data and set class index
        Instances data = new Instances(br);
        br.close();
        /*File csv=new File(filename);
        CSVLoader loader = new CSVLoader();
        loader.setSource(csv);
        Instances data = loader.getDataSet();*/
        data.setClassIndex(data.numAttributes() - 1);

        // classifier
        String[] tmpOptions;
        String classname = "weka.classifiers.trees.J48 -C 0.25";
        tmpOptions = classname.split(" ");
        classname = "weka.classifiers.trees.J48";
        tmpOptions[0] = "";
        Classifier cls = (Classifier) Utils.forName(Classifier.class, classname, tmpOptions);

        // other options
        int seed = 2;
        int folds = 10;

        // randomize data
        Random rand = new Random(seed);
        Instances randData = new Instances(data);
        randData.randomize(rand);
        if (randData.classAttribute().isNominal())
            randData.stratify(folds);

        // perform cross-validation
        eval = new Evaluation(randData);
        for (int n = 0; n < folds; n++) {
            Instances train = randData.trainCV(folds, n);
            Instances test = randData.testCV(folds, n);
            // the above code is used by the StratifiedRemoveFolds filter, the
            // code below by the Explorer/Experimenter:
            // Instances train = randData.trainCV(folds, n, rand);

            // build and evaluate classifier
            Classifier clsCopy = Classifier.makeCopy(cls);
            clsCopy.buildClassifier(train);
            eval.evaluateModel(clsCopy, test);
        }

        // output evaluation
        System.out.println();
        System.out.println("=== Setup ===");
        System.out
                .println("Classifier: " + cls.getClass().getName() + " " + Utils.joinOptions(cls.getOptions()));
        System.out.println("Dataset: " + data.relationName());
        System.out.println("Folds: " + folds);
        System.out.println("Seed: " + seed);
        System.out.println();
        System.out.println(eval.toSummaryString("Summary for testing", true));
        System.out.println("Correctly Classified Instances: " + eval.correct());
        System.out.println("Percentage of Correctly Classified Instances: " + eval.pctCorrect());
        System.out.println("InCorrectly Classified Instances: " + eval.incorrect());
        System.out.println("Percentage of InCorrectly Classified Instances: " + eval.pctIncorrect());

    } catch (Exception ex) {
        System.err.println(ex.getMessage());
    }
    return eval;
}

From source file:net.sf.jclal.evaluation.measure.SingleLabelEvaluation.java

License:Open Source License

/**
 *
 * @param evaluation The evaluation/* w  w w.ja v a2 s. c  o m*/
 */
public void setEvaluation(Evaluation evaluation) {

    try {
        this.evaluation = evaluation;
        StringBuilder st = new StringBuilder();

        st.append("Iteration: ").append(getIteration()).append("\n");
        st.append("Labeled set size: ").append(getLabeledSetSize()).append("\n");
        st.append("Unlabelled set size: ").append(getUnlabeledSetSize()).append("\n");
        st.append("\t\n");

        st.append("Correctly Classified Instances: ").append(evaluation.pctCorrect()).append("\n");
        st.append("Incorrectly Classified Instances: ").append(evaluation.pctIncorrect()).append("\n");
        st.append("Kappa statistic: ").append(evaluation.kappa()).append("\n");
        st.append("Mean absolute error: ").append(evaluation.meanAbsoluteError()).append("\n");
        st.append("Root mean squared error: ").append(evaluation.rootMeanSquaredError()).append("\n");

        st.append("Relative absolute error: ").append(evaluation.relativeAbsoluteError()).append("\n");
        st.append("Root relative squared error: ").append(evaluation.rootRelativeSquaredError()).append("\n");
        st.append("Coverage of cases: ").append(evaluation.coverageOfTestCasesByPredictedRegions())
                .append("\n");
        st.append("Mean region size: ").append(evaluation.sizeOfPredictedRegions()).append("\n");

        st.append("Weighted Precision: ").append(evaluation.weightedPrecision()).append("\n");
        st.append("Weighted Recall: ").append(evaluation.weightedRecall()).append("\n");
        st.append("Weighted FMeasure: ").append(evaluation.weightedFMeasure()).append("\n");
        st.append("Weighted TruePositiveRate: ").append(evaluation.weightedTruePositiveRate()).append("\n");
        st.append("Weighted FalsePositiveRate: ").append(evaluation.weightedFalsePositiveRate()).append("\n");
        st.append("Weighted MatthewsCorrelation: ").append(evaluation.weightedMatthewsCorrelation())
                .append("\n");
        st.append("Weighted AreaUnderROC: ").append(evaluation.weightedAreaUnderROC()).append("\n");
        st.append("Weighted AreaUnderPRC: ").append(evaluation.weightedAreaUnderPRC()).append("\n");

        st.append("\t\t\n");

        loadMetrics(st.toString());

    } catch (Exception e) {
        Logger.getLogger(SingleLabelEvaluation.class.getName()).log(Level.SEVERE, null, e);
    }
}

From source file:org.openml.webapplication.io.Output.java

License:Open Source License

public static Map<Metric, MetricScore> evaluatorToMap(Evaluation evaluator, int classes, TaskType task)
        throws Exception {
    Map<Metric, MetricScore> m = new HashMap<Metric, MetricScore>();

    if (task == TaskType.REGRESSION) {

        // here all measures for regression tasks
        m.put(new Metric("mean_absolute_error", "openml.evaluation.mean_absolute_error(1.0)"),
                new MetricScore(evaluator.meanAbsoluteError(), (int) evaluator.numInstances()));
        m.put(new Metric("mean_prior_absolute_error", "openml.evaluation.mean_prior_absolute_error(1.0)"),
                new MetricScore(evaluator.meanPriorAbsoluteError(), (int) evaluator.numInstances()));
        m.put(new Metric("root_mean_squared_error", "openml.evaluation.root_mean_squared_error(1.0)"),
                new MetricScore(evaluator.rootMeanSquaredError(), (int) evaluator.numInstances()));
        m.put(new Metric("root_mean_prior_squared_error",
                "openml.evaluation.root_mean_prior_squared_error(1.0)"),
                new MetricScore(evaluator.rootMeanPriorSquaredError(), (int) evaluator.numInstances()));
        m.put(new Metric("relative_absolute_error", "openml.evaluation.relative_absolute_error(1.0)"),
                new MetricScore(evaluator.relativeAbsoluteError() / 100, (int) evaluator.numInstances()));
        m.put(new Metric("root_relative_squared_error", "openml.evaluation.root_relative_squared_error(1.0)"),
                new MetricScore(evaluator.rootRelativeSquaredError() / 100, (int) evaluator.numInstances()));

    } else if (task == TaskType.CLASSIFICATION || task == TaskType.LEARNINGCURVE
            || task == TaskType.TESTTHENTRAIN) {

        m.put(new Metric("average_cost", "openml.evaluation.average_cost(1.0)"),
                new MetricScore(evaluator.avgCost(), (int) evaluator.numInstances()));
        m.put(new Metric("total_cost", "openml.evaluation.total_cost(1.0)"),
                new MetricScore(evaluator.totalCost(), (int) evaluator.numInstances()));

        m.put(new Metric("mean_absolute_error", "openml.evaluation.mean_absolute_error(1.0)"),
                new MetricScore(evaluator.meanAbsoluteError(), (int) evaluator.numInstances()));
        m.put(new Metric("mean_prior_absolute_error", "openml.evaluation.mean_prior_absolute_error(1.0)"),
                new MetricScore(evaluator.meanPriorAbsoluteError(), (int) evaluator.numInstances()));
        m.put(new Metric("root_mean_squared_error", "openml.evaluation.root_mean_squared_error(1.0)"),
                new MetricScore(evaluator.rootMeanSquaredError(), (int) evaluator.numInstances()));
        m.put(new Metric("root_mean_prior_squared_error",
                "openml.evaluation.root_mean_prior_squared_error(1.0)"),
                new MetricScore(evaluator.rootMeanPriorSquaredError(), (int) evaluator.numInstances()));
        m.put(new Metric("relative_absolute_error", "openml.evaluation.relative_absolute_error(1.0)"),
                new MetricScore(evaluator.relativeAbsoluteError() / 100, (int) evaluator.numInstances()));
        m.put(new Metric("root_relative_squared_error", "openml.evaluation.root_relative_squared_error(1.0)"),
                new MetricScore(evaluator.rootRelativeSquaredError() / 100, (int) evaluator.numInstances()));

        m.put(new Metric("prior_entropy", "openml.evaluation.prior_entropy(1.0)"),
                new MetricScore(evaluator.priorEntropy(), (int) evaluator.numInstances()));
        m.put(new Metric("kb_relative_information_score",
                "openml.evaluation.kb_relative_information_score(1.0)"),
                new MetricScore(evaluator.KBRelativeInformation() / 100, (int) evaluator.numInstances()));

        Double[] precision = new Double[classes];
        Double[] recall = new Double[classes];
        Double[] auroc = new Double[classes];
        Double[] fMeasure = new Double[classes];
        Double[] instancesPerClass = new Double[classes];
        double[][] confussion_matrix = evaluator.confusionMatrix();
        for (int i = 0; i < classes; ++i) {
            precision[i] = evaluator.precision(i);
            recall[i] = evaluator.recall(i);
            auroc[i] = evaluator.areaUnderROC(i);
            fMeasure[i] = evaluator.fMeasure(i);
            instancesPerClass[i] = 0.0;/*  w w w.j  a  v a  2s.c om*/
            for (int j = 0; j < classes; ++j) {
                instancesPerClass[i] += confussion_matrix[i][j];
            }
        }

        m.put(new Metric("predictive_accuracy", "openml.evaluation.predictive_accuracy(1.0)"),
                new MetricScore(evaluator.pctCorrect() / 100, (int) evaluator.numInstances()));
        m.put(new Metric("kappa", "openml.evaluation.kappa(1.0)"),
                new MetricScore(evaluator.kappa(), (int) evaluator.numInstances()));

        m.put(new Metric("number_of_instances", "openml.evaluation.number_of_instances(1.0)"),
                new MetricScore(evaluator.numInstances(), instancesPerClass, (int) evaluator.numInstances()));

        m.put(new Metric("precision", "openml.evaluation.precision(1.0)"),
                new MetricScore(evaluator.weightedPrecision(), precision, (int) evaluator.numInstances()));
        m.put(new Metric("recall", "openml.evaluation.recall(1.0)"),
                new MetricScore(evaluator.weightedRecall(), recall, (int) evaluator.numInstances()));
        m.put(new Metric("f_measure", "openml.evaluation.f_measure(1.0)"),
                new MetricScore(evaluator.weightedFMeasure(), fMeasure, (int) evaluator.numInstances()));
        if (Utils.isMissingValue(evaluator.weightedAreaUnderROC()) == false) {
            m.put(new Metric("area_under_roc_curve", "openml.evaluation.area_under_roc_curve(1.0)"),
                    new MetricScore(evaluator.weightedAreaUnderROC(), auroc, (int) evaluator.numInstances()));
        }
        m.put(new Metric("confusion_matrix", "openml.evaluation.confusion_matrix(1.0)"),
                new MetricScore(confussion_matrix));
    }
    return m;
}

From source file:qa.experiment.ProcessFeatureVector.java

public void evaluate(Instances trainingData) throws Exception {
    Classifier c1 = new SMO();
    Evaluation eval = new Evaluation(trainingData);
    eval.crossValidateModel(c1, trainingData, 10, new Random(1));
    System.out.println("Estimated Accuracy: " + Double.toString(eval.pctCorrect()));
}

From source file:wekimini.learning.ModelEvaluator.java

public void evaluateAll(final List<Path> paths, final boolean isTraining, final int numFolds,
        PropertyChangeListener listener) {
    final List<Instances> data = new LinkedList<>();
    for (Path p : paths) {
        Instances i = w.getSupervisedLearningManager().getTrainingDataForPath(p, false);
        data.add(i);/*from   w  w  w. j  av a2 s.c o m*/
    }

    setResults(new String[paths.size()]);
    if (evalWorker != null && evalWorker.getState() != SwingWorker.StateValue.DONE) {
        return;
    }

    evalWorker = new SwingWorker<Integer, Void>() {

        //trainingWorker.
        @Override
        public Integer doInBackground() {
            // train(); //TODO: Add status updates
            int progress = 0;
            //setProgress(progress);
            int numToEvaluate = 0;
            for (Path p : paths) {
                if (p.canBuild()) {
                    numToEvaluate++;
                }
            }

            int numEvaluated = 0;
            int numErr = 0;
            setEvalStatus(new EvaluationStatus(numToEvaluate, numEvaluated, numErr, false));

            for (int i = 0; i < paths.size(); i++) {
                Path p = paths.get(i);
                if (p.canBuild()) {
                    try {
                        System.out.println("Evaluating with " + numFolds);
                        //EVALUATE HERE: TODO 
                        Instances instances = w.getSupervisedLearningManager().getTrainingDataForPath(p, false);
                        Evaluation eval = new Evaluation(instances);
                        Classifier c = ((LearningModelBuilder) p.getModelBuilder()).getClassifier();
                        if (!isTraining) {
                            Random r = new Random();
                            eval.crossValidateModel(c, instances, numFolds, r);
                        } else {
                            Classifier c2 = Classifier.makeCopy(c);
                            c2.buildClassifier(instances);
                            eval.evaluateModel(c2, instances);
                        }
                        String result;
                        if (p.getModelBuilder() instanceof ClassificationModelBuilder) {
                            result = dFormat.format(eval.pctCorrect()) + "%"; //WON"T WORK FOR NN
                        } else {
                            result = dFormat.format(eval.errorRate()) + " (RMS)";
                        }
                        if (!isTraining) {
                            KadenzeLogging.getLogger().crossValidationComputed(w, i, numFolds, result);
                        } else {
                            KadenzeLogging.getLogger().trainingAccuracyComputed(w, i, result);
                        }
                        setResults(i, result);
                        finishedModel(i, result);
                        numEvaluated++;

                        if (isCancelled()) {
                            cancelMe(p);
                            setResults(i, "Cancelled");
                            return 0;
                        }

                    } catch (InterruptedException ex) {
                        cancelMe(p);
                        setResults(i, "Cancelled");
                        return 0; //Not sure this will be called...
                    } catch (Exception ex) {
                        numErr++;
                        Util.showPrettyErrorPane(null, "Error encountered during evaluation "
                                + p.getCurrentModelName() + ": " + ex.getMessage());
                        logger.log(Level.SEVERE, ex.getMessage());
                    }
                    setEvalStatus(new EvaluationStatus(numToEvaluate, numEvaluated, numErr, false));
                } else {
                    logger.log(Level.WARNING, "Could not evaluate path");
                }

            }
            wasCancelled = false;
            hadError = evaluationStatus.numErrorsEncountered > 0;
            return 0;
        }

        @Override
        public void done() {
            if (isCancelled()) {
                EvaluationStatus t = new EvaluationStatus(evaluationStatus.numToEvaluate,
                        evaluationStatus.numEvaluated, evaluationStatus.numErrorsEncountered, true);
                setEvalStatus(t);
            }
            finished();
        }
    };
    evalWorker.addPropertyChangeListener(listener);
    evalWorker.execute();
}