List of usage examples for weka.classifiers Evaluation correlationCoefficient
public final double correlationCoefficient() throws Exception
From source file:adams.flow.core.EvaluationHelper.java
License:Open Source License
/** * Returns a statistical value from the evaluation object. * * @param eval the evaluation object to get the value from * @param statistic the type of value to return * @param classIndex the class label index, for statistics like AUC * @return the determined value, Double.NaN if not found * @throws Exception if evaluation fails */// w ww .ja va 2 s .c o m public static double getValue(Evaluation eval, EvaluationStatistic statistic, int classIndex) throws Exception { switch (statistic) { case NUMBER_CORRECT: return eval.correct(); case NUMBER_INCORRECT: return eval.incorrect(); case NUMBER_UNCLASSIFIED: return eval.unclassified(); case PERCENT_CORRECT: return eval.pctCorrect(); case PERCENT_INCORRECT: return eval.pctIncorrect(); case PERCENT_UNCLASSIFIED: return eval.pctUnclassified(); case KAPPA_STATISTIC: return eval.kappa(); case MEAN_ABSOLUTE_ERROR: return eval.meanAbsoluteError(); case ROOT_MEAN_SQUARED_ERROR: return eval.rootMeanSquaredError(); case RELATIVE_ABSOLUTE_ERROR: return eval.relativeAbsoluteError(); case ROOT_RELATIVE_SQUARED_ERROR: return eval.rootRelativeSquaredError(); case CORRELATION_COEFFICIENT: return eval.correlationCoefficient(); case SF_PRIOR_ENTROPY: return eval.SFPriorEntropy(); case SF_SCHEME_ENTROPY: return eval.SFSchemeEntropy(); case SF_ENTROPY_GAIN: return eval.SFEntropyGain(); case SF_MEAN_PRIOR_ENTROPY: return eval.SFMeanPriorEntropy(); case SF_MEAN_SCHEME_ENTROPY: return eval.SFMeanSchemeEntropy(); case SF_MEAN_ENTROPY_GAIN: return eval.SFMeanEntropyGain(); case KB_INFORMATION: return eval.KBInformation(); case KB_MEAN_INFORMATION: return eval.KBMeanInformation(); case KB_RELATIVE_INFORMATION: return eval.KBRelativeInformation(); case TRUE_POSITIVE_RATE: return eval.truePositiveRate(classIndex); case NUM_TRUE_POSITIVES: return eval.numTruePositives(classIndex); case FALSE_POSITIVE_RATE: return eval.falsePositiveRate(classIndex); case NUM_FALSE_POSITIVES: return eval.numFalsePositives(classIndex); case TRUE_NEGATIVE_RATE: return eval.trueNegativeRate(classIndex); case NUM_TRUE_NEGATIVES: return eval.numTrueNegatives(classIndex); case FALSE_NEGATIVE_RATE: return eval.falseNegativeRate(classIndex); case NUM_FALSE_NEGATIVES: return eval.numFalseNegatives(classIndex); case IR_PRECISION: return eval.precision(classIndex); case IR_RECALL: return eval.recall(classIndex); case F_MEASURE: return eval.fMeasure(classIndex); case MATTHEWS_CORRELATION_COEFFICIENT: return eval.matthewsCorrelationCoefficient(classIndex); case AREA_UNDER_ROC: return eval.areaUnderROC(classIndex); case AREA_UNDER_PRC: return eval.areaUnderPRC(classIndex); case WEIGHTED_TRUE_POSITIVE_RATE: return eval.weightedTruePositiveRate(); case WEIGHTED_FALSE_POSITIVE_RATE: return eval.weightedFalsePositiveRate(); case WEIGHTED_TRUE_NEGATIVE_RATE: return eval.weightedTrueNegativeRate(); case WEIGHTED_FALSE_NEGATIVE_RATE: return eval.weightedFalseNegativeRate(); case WEIGHTED_IR_PRECISION: return eval.weightedPrecision(); case WEIGHTED_IR_RECALL: return eval.weightedRecall(); case WEIGHTED_F_MEASURE: return eval.weightedFMeasure(); case WEIGHTED_MATTHEWS_CORRELATION_COEFFICIENT: return eval.weightedMatthewsCorrelation(); case WEIGHTED_AREA_UNDER_ROC: return eval.weightedAreaUnderROC(); case WEIGHTED_AREA_UNDER_PRC: return eval.weightedAreaUnderPRC(); case UNWEIGHTED_MACRO_F_MEASURE: return eval.unweightedMacroFmeasure(); case UNWEIGHTED_MICRO_F_MEASURE: return eval.unweightedMicroFmeasure(); case BIAS: return eval.getPluginMetric(Bias.class.getName()).getStatistic(Bias.NAME); case RSQUARED: return eval.getPluginMetric(RSquared.class.getName()).getStatistic(RSquared.NAME); case SDR: return eval.getPluginMetric(SDR.class.getName()).getStatistic(SDR.NAME); case RPD: return eval.getPluginMetric(RPD.class.getName()).getStatistic(RPD.NAME); default: throw new IllegalArgumentException("Unhandled statistic field: " + statistic); } }
From source file:adams.opt.cso.Measure.java
License:Open Source License
/** * Extracts the measure from the Evaluation object. * * @param evaluation the evaluation to use * @param adjust whether to adjust the measure * @return the measure//from w ww .j a v a 2 s . c o m * @throws Exception in case the retrieval of the measure fails */ public double extract(Evaluation evaluation, boolean adjust) throws Exception { switch (this) { case ACC: if (adjust) return 100.0 - evaluation.pctCorrect(); else return evaluation.pctCorrect(); case CC: if (adjust) return 1.0 - evaluation.correlationCoefficient(); else return evaluation.correlationCoefficient(); case MAE: return evaluation.meanAbsoluteError(); case RAE: return evaluation.relativeAbsoluteError(); case RMSE: return evaluation.rootMeanSquaredError(); case RRSE: return evaluation.rootRelativeSquaredError(); default: throw new IllegalStateException("Unhandled measure '" + this + "'!"); } }
From source file:adams.opt.genetic.Measure.java
License:Open Source License
/** * Extracts the measure from the Evaluation object. * * @param evaluation the evaluation to use * @param adjust whether to just the measure * @return the measure//from w w w.ja va 2s .c o m * @see #adjust(double) * @throws Exception in case the retrieval of the measure fails */ public double extract(Evaluation evaluation, boolean adjust) throws Exception { double result; if (this == Measure.ACC) result = evaluation.pctCorrect(); else if (this == Measure.CC) result = evaluation.correlationCoefficient(); else if (this == Measure.MAE) result = evaluation.meanAbsoluteError(); else if (this == Measure.RAE) result = evaluation.relativeAbsoluteError(); else if (this == Measure.RMSE) result = evaluation.rootMeanSquaredError(); else if (this == Measure.RRSE) result = evaluation.rootRelativeSquaredError(); else throw new IllegalStateException("Unhandled measure '" + this + "'!"); if (adjust) result = adjust(result); return result; }
From source file:adams.opt.optimise.genetic.fitnessfunctions.AttributeSelection.java
License:Open Source License
public double evaluate(OptData opd) { init();/*w w w . j a va 2s . c om*/ int cnt = 0; int[] weights = getWeights(opd); Instances newInstances = new Instances(getInstances()); for (int i = 0; i < getInstances().numInstances(); i++) { Instance in = newInstances.instance(i); cnt = 0; for (int a = 0; a < getInstances().numAttributes(); a++) { if (a == getInstances().classIndex()) continue; if (weights[cnt++] == 0) { in.setValue(a, 0); } else { in.setValue(a, in.value(a)); } } } Classifier newClassifier = null; try { newClassifier = (Classifier) OptionUtils.shallowCopy(getClassifier()); // evaluate classifier on data Evaluation evaluation = new Evaluation(newInstances); evaluation.crossValidateModel(newClassifier, newInstances, getFolds(), new Random(getCrossValidationSeed())); // obtain measure double measure = 0; if (getMeasure() == Measure.ACC) measure = evaluation.pctCorrect(); else if (getMeasure() == Measure.CC) measure = evaluation.correlationCoefficient(); else if (getMeasure() == Measure.MAE) measure = evaluation.meanAbsoluteError(); else if (getMeasure() == Measure.RAE) measure = evaluation.relativeAbsoluteError(); else if (getMeasure() == Measure.RMSE) measure = evaluation.rootMeanSquaredError(); else if (getMeasure() == Measure.RRSE) measure = evaluation.rootRelativeSquaredError(); else throw new IllegalStateException("Unhandled measure '" + getMeasure() + "'!"); measure = getMeasure().adjust(measure); return (measure); // process fitness } catch (Exception e) { getLogger().log(Level.SEVERE, "Error evaluating", e); } return 0; }
From source file:asap.NLPSystem.java
private String _buildClassifier() { Evaluation eval; try {/*from w w w . jav a 2 s .c o m*/ eval = new Evaluation(trainingSet); } catch (Exception ex) { Logger.getLogger(NLPSystem.class.getName()).log(Level.SEVERE, null, ex); return "Error creating evaluation instance for given data!"; } try { classifier.buildClassifier(trainingSet); } catch (Exception ex) { Logger.getLogger(NLPSystem.class.getName()).log(Level.SEVERE, null, ex); } try { trainingPredictions = eval.evaluateModel(classifier, trainingSet); trainingPearsonsCorrelation = eval.correlationCoefficient(); } catch (Exception ex) { Logger.getLogger(NLPSystem.class.getName()).log(Level.SEVERE, null, ex); } classifierBuilt = true; return "Classifier built (" + trainingPearsonsCorrelation + ")."; }
From source file:asap.NLPSystem.java
private String crossValidate(int seed, int folds, String modelOutputFile) { PerformanceCounters.startTimer("cross-validation"); PerformanceCounters.startTimer("cross-validation init"); AbstractClassifier abstractClassifier = (AbstractClassifier) classifier; // randomize data Random rand = new Random(seed); Instances randData = new Instances(trainingSet); randData.randomize(rand);/* w ww . j a v a 2s . co m*/ if (randData.classAttribute().isNominal()) { randData.stratify(folds); } // perform cross-validation and add predictions Evaluation eval; try { eval = new Evaluation(randData); } catch (Exception ex) { Logger.getLogger(NLPSystem.class.getName()).log(Level.SEVERE, null, ex); return "Error creating evaluation instance for given data!"; } List<Thread> foldThreads = (List<Thread>) Collections.synchronizedList(new LinkedList<Thread>()); List<FoldSet> foldSets = (List<FoldSet>) Collections.synchronizedList(new LinkedList<FoldSet>()); for (int n = 0; n < folds; n++) { try { foldSets.add(new FoldSet(randData.trainCV(folds, n), randData.testCV(folds, n), AbstractClassifier.makeCopy(abstractClassifier))); } catch (Exception ex) { Logger.getLogger(NLPSystem.class.getName()).log(Level.SEVERE, null, ex); } if (n < Config.getNumThreads() - 1) { Thread foldThread = new Thread(new CrossValidationFoldThread(n, foldSets, eval)); foldThreads.add(foldThread); } } PerformanceCounters.stopTimer("cross-validation init"); PerformanceCounters.startTimer("cross-validation folds+train"); if (Config.getNumThreads() > 1) { for (Thread foldThread : foldThreads) { foldThread.start(); } } else { new CrossValidationFoldThread(0, foldSets, eval).run(); } for (Thread foldThread : foldThreads) { while (foldThread.isAlive()) { try { foldThread.join(); } catch (InterruptedException ex) { Logger.getLogger(NLPSystem.class.getName()).log(Level.SEVERE, null, ex); } } } PerformanceCounters.stopTimer("cross-validation folds+train"); PerformanceCounters.startTimer("cross-validation post"); // evaluation for output: String out = String.format( "\n=== Setup ===\nClassifier: %s %s\n" + "Dataset: %s\nFolds: %s\nSeed: %s\n\n%s\n", abstractClassifier.getClass().getName(), Utils.joinOptions(abstractClassifier.getOptions()), trainingSet.relationName(), folds, seed, eval.toSummaryString(String.format("=== %s-fold Cross-validation ===", folds), false)); try { crossValidationPearsonsCorrelation = eval.correlationCoefficient(); } catch (Exception ex) { Logger.getLogger(NLPSystem.class.getName()).log(Level.SEVERE, null, ex); } if (modelOutputFile != null) { if (!modelOutputFile.isEmpty()) { try { SerializationHelper.write(modelOutputFile, abstractClassifier); } catch (Exception ex) { Logger.getLogger(NLPSystem.class.getName()).log(Level.SEVERE, null, ex); } } } classifierBuiltWithCrossValidation = true; PerformanceCounters.stopTimer("cross-validation post"); PerformanceCounters.stopTimer("cross-validation"); return out; }
From source file:asap.NLPSystem.java
private void evaluateModel(boolean printEvaluation) { // checkInstancesFeatures(evaluationSet); PerformanceCounters.startTimer("evaluateModel"); System.out.println("Evaluating model..."); AbstractClassifier abstractClassifier = (AbstractClassifier) classifier; try {/*from w w w . j av a 2s . c o m*/ // evaluate classifier and print some statistics Evaluation eval = new Evaluation(evaluationSet); evaluationPredictions = eval.evaluateModel(abstractClassifier, evaluationSet); if (printEvaluation) { System.out.println("\tstats for model:" + abstractClassifier.getClass().getName() + " " + Utils.joinOptions(abstractClassifier.getOptions())); System.out.println(eval.toSummaryString()); } evaluationPearsonsCorrelation = eval.correlationCoefficient(); evaluated = true; } catch (Exception ex) { Logger.getLogger(PostProcess.class.getName()).log(Level.SEVERE, null, ex); } System.out.println("\tevaluation done."); PerformanceCounters.stopTimer("evaluateModel"); }
From source file:cezeri.feature.selection.FeatureSelectionRanker.java
private static TFeatureRank[] computeCombinationPairs(String[] lstComb, Instances data, Classifier model, int nFolds, boolean show_text, boolean show_plot) { TFeatureRank[] ret = new TFeatureRank[lstComb.length]; int m = lstComb.length; double q = m * 1.0 / 100; int n = 0;//from w w w.j ava2s . c om for (int i = 0; i < m; i++) { if (n != (int) Math.round(i / q)) { n = (int) Math.round(i / q); System.out.println("progress:" + n + "%"); } TFeatureRank obj = new TFeatureRank(); obj.featureName = lstComb[i]; obj.index = i + ""; Instances subsetData = FactoryInstance.getSubsetData(data, lstComb[i].split(",")); Evaluation eval = FactoryEvaluation.performCrossValidate(model, subsetData, nFolds, show_text, show_plot); try { if (data.classAttribute().isNominal()) { obj.value = eval.pctCorrect(); } else { obj.value = eval.correlationCoefficient(); } } catch (Exception ex) { Logger.getLogger(FeatureSelectionRanker.class.getName()).log(Level.SEVERE, null, ex); } ret[i] = obj; } ArrayList<TFeatureRank> lst = toArrayList(ret); Collections.sort(lst, new CustomComparatorForFeatureRank()); ret = toArray(lst); return ret; }
From source file:cezeri.feature.selection.FeatureSelectionRanker.java
private static double computeCombinationFeature(String lstComb, Instances data, int folds, Classifier model, boolean show_text, boolean show_plot) { TFeatureRank obj = new TFeatureRank(); obj.featureName = lstComb;/*from w w w.j a va2 s . c om*/ obj.index = ""; Instances subsetData = FactoryInstance.getSubsetData(data, lstComb.split(",")); Evaluation eval = FactoryEvaluation.performCrossValidate(model, subsetData, folds, show_text, show_plot); try { if (data.classAttribute().isNominal()) { obj.value = eval.pctCorrect(); } else { obj.value = eval.correlationCoefficient(); } } catch (Exception ex) { Logger.getLogger(FeatureSelectionRanker.class.getName()).log(Level.SEVERE, null, ex); } return obj.value; }
From source file:epsi.i5.datamining.Weka.java
public void excutionAlgo() throws FileNotFoundException, IOException, Exception { BufferedReader reader = new BufferedReader(new FileReader("src/epsi/i5/data/" + fileOne + ".arff")); Instances data = new Instances(reader); reader.close();// w w w . ja v a2 s .c o m //System.out.println(data.attribute(0)); data.setClass(data.attribute(0)); NaiveBayes NB = new NaiveBayes(); NB.buildClassifier(data); Evaluation naiveBayes = new Evaluation(data); naiveBayes.crossValidateModel(NB, data, 10, new Random(1)); naiveBayes.evaluateModel(NB, data); //System.out.println(test.confusionMatrix() + "1"); //System.out.println(test.correct() + "2"); System.out.println("*****************************"); System.out.println("******** Naive Bayes ********"); System.out.println(naiveBayes.toMatrixString()); System.out.println("*****************************"); System.out.println("**** Pourcentage Correct ****"); System.out.println(naiveBayes.pctCorrect()); System.out.println(""); J48 j = new J48(); j.buildClassifier(data); Evaluation jeval = new Evaluation(data); jeval.crossValidateModel(j, data, 10, new Random(1)); jeval.evaluateModel(j, data); System.out.println("*****************************"); System.out.println("************ J48 ************"); System.out.println(jeval.toMatrixString()); System.out.println("*****************************"); System.out.println("**** Pourcentage Correct ****"); System.out.println(jeval.pctCorrect()); System.out.println(""); DecisionTable DT = new DecisionTable(); DT.buildClassifier(data); Evaluation decisionTable = new Evaluation(data); decisionTable.crossValidateModel(DT, data, 10, new Random(1)); decisionTable.evaluateModel(DT, data); System.out.println("*****************************"); System.out.println("******* DecisionTable *******"); System.out.println(decisionTable.toMatrixString()); System.out.println("*****************************"); System.out.println("**** Pourcentage Correct ****"); System.out.println(decisionTable.pctCorrect()); System.out.println(""); OneR OR = new OneR(); OR.buildClassifier(data); Evaluation oneR = new Evaluation(data); oneR.crossValidateModel(OR, data, 10, new Random(1)); oneR.evaluateModel(OR, data); System.out.println("*****************************"); System.out.println("************ OneR ***********"); System.out.println(oneR.toMatrixString()); System.out.println("*****************************"); System.out.println("**** Pourcentage Correct ****"); System.out.println(oneR.pctCorrect()); //Polarit data.setClass(data.attribute(1)); System.out.println(""); M5Rules MR = new M5Rules(); MR.buildClassifier(data); Evaluation m5rules = new Evaluation(data); m5rules.crossValidateModel(MR, data, 10, new Random(1)); m5rules.evaluateModel(MR, data); System.out.println("*****************************"); System.out.println("********** M5Rules **********"); System.out.println(m5rules.correlationCoefficient()); System.out.println(""); LinearRegression LR = new LinearRegression(); LR.buildClassifier(data); Evaluation linearR = new Evaluation(data); linearR.crossValidateModel(LR, data, 10, new Random(1)); linearR.evaluateModel(LR, data); System.out.println("*****************************"); System.out.println("********** linearR **********"); System.out.println(linearR.correlationCoefficient()); }