List of usage examples for weka.classifiers Evaluation fMeasure
public double fMeasure(int classIndex)
From source file:main.mFFNN.java
public static void main(String[] args) throws Exception { mFFNN m = new mFFNN(); BufferedReader breader = null; breader = new BufferedReader(new FileReader("src\\main\\iris.arff")); Instances fileTrain = new Instances(breader); fileTrain.setClassIndex(fileTrain.numAttributes() - 1); System.out.println(fileTrain); breader.close();/*from w w w . j a va2s . co m*/ System.out.println("mFFNN!!!\n\n"); FeedForwardNeuralNetwork FFNN = new FeedForwardNeuralNetwork(); Evaluation eval = new Evaluation(fileTrain); FFNN.buildClassifier(fileTrain); eval.evaluateModel(FFNN, fileTrain); //OUTPUT Scanner scan = new Scanner(System.in); System.out.println(eval.toSummaryString("=== Stratified cross-validation ===\n" + "=== Summary ===", true)); System.out.println(eval.toClassDetailsString("=== Detailed Accuracy By Class ===")); System.out.println(eval.toMatrixString("===Confusion matrix===")); System.out.println(eval.fMeasure(1) + " " + eval.recall(1)); System.out.println("\nDo you want to save this model(1/0)? "); FFNN.distributionForInstance(fileTrain.get(0)); /* int c = scan.nextInt(); if (c == 1 ){ System.out.print("Please enter your file name (*.model) : "); String infile = scan.next(); m.saveModel(FFNN,infile); } else { System.out.print("Model not saved."); } */ }
From source file:meddle.TrainModelByDomainOS.java
License:Open Source License
/** * Do evalution on trained classifier/model, including the summary, false * positive/negative rate, AUC, running time * * @param j48/*from w w w . j a v a 2s . c o m*/ * - the trained classifier * @param domain * - the domain name */ public static MetaEvaluationMeasures doEvaluation(Classifier classifier, String domainOS, Instances tras, MetaEvaluationMeasures mem) { try { Evaluation evaluation = new Evaluation(tras); evaluation.crossValidateModel(classifier, tras, 10, new Random(1)); mem.numInstance = evaluation.numInstances(); double M = evaluation.numTruePositives(1) + evaluation.numFalseNegatives(1); mem.numPositive = (int) M; mem.AUC = evaluation.areaUnderROC(1); mem.numCorrectlyClassified = (int) evaluation.correct(); mem.accuracy = 1.0 * mem.numCorrectlyClassified / mem.numInstance; mem.falseNegativeRate = evaluation.falseNegativeRate(1); mem.falsePositiveRate = evaluation.falsePositiveRate(1); mem.fMeasure = evaluation.fMeasure(1); double[][] cmMatrix = evaluation.confusionMatrix(); mem.confusionMatrix = cmMatrix; mem.TP = evaluation.numTruePositives(1); mem.TN = evaluation.numTrueNegatives(1); mem.FP = evaluation.numFalsePositives(1); mem.FN = evaluation.numFalseNegatives(1); } catch (Exception e) { e.printStackTrace(); } return mem; }
From source file:org.openml.webapplication.io.Output.java
License:Open Source License
public static Map<Metric, MetricScore> evaluatorToMap(Evaluation evaluator, int classes, TaskType task) throws Exception { Map<Metric, MetricScore> m = new HashMap<Metric, MetricScore>(); if (task == TaskType.REGRESSION) { // here all measures for regression tasks m.put(new Metric("mean_absolute_error", "openml.evaluation.mean_absolute_error(1.0)"), new MetricScore(evaluator.meanAbsoluteError(), (int) evaluator.numInstances())); m.put(new Metric("mean_prior_absolute_error", "openml.evaluation.mean_prior_absolute_error(1.0)"), new MetricScore(evaluator.meanPriorAbsoluteError(), (int) evaluator.numInstances())); m.put(new Metric("root_mean_squared_error", "openml.evaluation.root_mean_squared_error(1.0)"), new MetricScore(evaluator.rootMeanSquaredError(), (int) evaluator.numInstances())); m.put(new Metric("root_mean_prior_squared_error", "openml.evaluation.root_mean_prior_squared_error(1.0)"), new MetricScore(evaluator.rootMeanPriorSquaredError(), (int) evaluator.numInstances())); m.put(new Metric("relative_absolute_error", "openml.evaluation.relative_absolute_error(1.0)"), new MetricScore(evaluator.relativeAbsoluteError() / 100, (int) evaluator.numInstances())); m.put(new Metric("root_relative_squared_error", "openml.evaluation.root_relative_squared_error(1.0)"), new MetricScore(evaluator.rootRelativeSquaredError() / 100, (int) evaluator.numInstances())); } else if (task == TaskType.CLASSIFICATION || task == TaskType.LEARNINGCURVE || task == TaskType.TESTTHENTRAIN) { m.put(new Metric("average_cost", "openml.evaluation.average_cost(1.0)"), new MetricScore(evaluator.avgCost(), (int) evaluator.numInstances())); m.put(new Metric("total_cost", "openml.evaluation.total_cost(1.0)"), new MetricScore(evaluator.totalCost(), (int) evaluator.numInstances())); m.put(new Metric("mean_absolute_error", "openml.evaluation.mean_absolute_error(1.0)"), new MetricScore(evaluator.meanAbsoluteError(), (int) evaluator.numInstances())); m.put(new Metric("mean_prior_absolute_error", "openml.evaluation.mean_prior_absolute_error(1.0)"), new MetricScore(evaluator.meanPriorAbsoluteError(), (int) evaluator.numInstances())); m.put(new Metric("root_mean_squared_error", "openml.evaluation.root_mean_squared_error(1.0)"), new MetricScore(evaluator.rootMeanSquaredError(), (int) evaluator.numInstances())); m.put(new Metric("root_mean_prior_squared_error", "openml.evaluation.root_mean_prior_squared_error(1.0)"), new MetricScore(evaluator.rootMeanPriorSquaredError(), (int) evaluator.numInstances())); m.put(new Metric("relative_absolute_error", "openml.evaluation.relative_absolute_error(1.0)"), new MetricScore(evaluator.relativeAbsoluteError() / 100, (int) evaluator.numInstances())); m.put(new Metric("root_relative_squared_error", "openml.evaluation.root_relative_squared_error(1.0)"), new MetricScore(evaluator.rootRelativeSquaredError() / 100, (int) evaluator.numInstances())); m.put(new Metric("prior_entropy", "openml.evaluation.prior_entropy(1.0)"), new MetricScore(evaluator.priorEntropy(), (int) evaluator.numInstances())); m.put(new Metric("kb_relative_information_score", "openml.evaluation.kb_relative_information_score(1.0)"), new MetricScore(evaluator.KBRelativeInformation() / 100, (int) evaluator.numInstances())); Double[] precision = new Double[classes]; Double[] recall = new Double[classes]; Double[] auroc = new Double[classes]; Double[] fMeasure = new Double[classes]; Double[] instancesPerClass = new Double[classes]; double[][] confussion_matrix = evaluator.confusionMatrix(); for (int i = 0; i < classes; ++i) { precision[i] = evaluator.precision(i); recall[i] = evaluator.recall(i); auroc[i] = evaluator.areaUnderROC(i); fMeasure[i] = evaluator.fMeasure(i); instancesPerClass[i] = 0.0;//from w w w. j ava2 s . c om for (int j = 0; j < classes; ++j) { instancesPerClass[i] += confussion_matrix[i][j]; } } m.put(new Metric("predictive_accuracy", "openml.evaluation.predictive_accuracy(1.0)"), new MetricScore(evaluator.pctCorrect() / 100, (int) evaluator.numInstances())); m.put(new Metric("kappa", "openml.evaluation.kappa(1.0)"), new MetricScore(evaluator.kappa(), (int) evaluator.numInstances())); m.put(new Metric("number_of_instances", "openml.evaluation.number_of_instances(1.0)"), new MetricScore(evaluator.numInstances(), instancesPerClass, (int) evaluator.numInstances())); m.put(new Metric("precision", "openml.evaluation.precision(1.0)"), new MetricScore(evaluator.weightedPrecision(), precision, (int) evaluator.numInstances())); m.put(new Metric("recall", "openml.evaluation.recall(1.0)"), new MetricScore(evaluator.weightedRecall(), recall, (int) evaluator.numInstances())); m.put(new Metric("f_measure", "openml.evaluation.f_measure(1.0)"), new MetricScore(evaluator.weightedFMeasure(), fMeasure, (int) evaluator.numInstances())); if (Utils.isMissingValue(evaluator.weightedAreaUnderROC()) == false) { m.put(new Metric("area_under_roc_curve", "openml.evaluation.area_under_roc_curve(1.0)"), new MetricScore(evaluator.weightedAreaUnderROC(), auroc, (int) evaluator.numInstances())); } m.put(new Metric("confusion_matrix", "openml.evaluation.confusion_matrix(1.0)"), new MetricScore(confussion_matrix)); } return m; }
From source file:sentinets.Prediction.java
License:Open Source License
public String updateModel(String inputFile, ArrayList<Double[]> metrics) { String output = ""; this.setInstances(inputFile); FilteredClassifier fcls = (FilteredClassifier) this.cls; SGD cls = (SGD) fcls.getClassifier(); Filter filter = fcls.getFilter(); Instances insAll;/*from www . j a v a 2 s . c o m*/ try { insAll = Filter.useFilter(this.unlabled, filter); if (insAll.size() > 0) { Random rand = new Random(10); int folds = 10 > insAll.size() ? 2 : 10; Instances randData = new Instances(insAll); randData.randomize(rand); if (randData.classAttribute().isNominal()) { randData.stratify(folds); } Evaluation eval = new Evaluation(randData); eval.evaluateModel(cls, insAll); System.out.println("Initial Evaluation"); System.out.println(eval.toSummaryString()); System.out.println(eval.toClassDetailsString()); metrics.add(new Double[] { eval.fMeasure(0), eval.fMeasure(1), eval.weightedFMeasure() }); output += "\n====" + "Initial Evaluation" + "====\n"; output += "\n" + eval.toSummaryString(); output += "\n" + eval.toClassDetailsString(); System.out.println("Cross Validated Evaluation"); output += "\n====" + "Cross Validated Evaluation" + "====\n"; for (int n = 0; n < folds; n++) { Instances train = randData.trainCV(folds, n); Instances test = randData.testCV(folds, n); for (int i = 0; i < train.numInstances(); i++) { cls.updateClassifier(train.instance(i)); } eval.evaluateModel(cls, test); System.out.println("Cross Validated Evaluation fold: " + n); output += "\n====" + "Cross Validated Evaluation fold (" + n + ")====\n"; System.out.println(eval.toSummaryString()); System.out.println(eval.toClassDetailsString()); output += "\n" + eval.toSummaryString(); output += "\n" + eval.toClassDetailsString(); metrics.add(new Double[] { eval.fMeasure(0), eval.fMeasure(1), eval.weightedFMeasure() }); } for (int i = 0; i < insAll.numInstances(); i++) { cls.updateClassifier(insAll.instance(i)); } eval.evaluateModel(cls, insAll); System.out.println("Final Evaluation"); System.out.println(eval.toSummaryString()); System.out.println(eval.toClassDetailsString()); output += "\n====" + "Final Evaluation" + "====\n"; output += "\n" + eval.toSummaryString(); output += "\n" + eval.toClassDetailsString(); metrics.add(new Double[] { eval.fMeasure(0), eval.fMeasure(1), eval.weightedFMeasure() }); fcls.setClassifier(cls); String modelFilePath = outputDir + "/" + Utils.getOutDir(Utils.OutDirIndex.MODELS) + "/updatedClassifier.model"; weka.core.SerializationHelper.write(modelFilePath, fcls); output += "\n" + "Updated Model saved at: " + modelFilePath; } else { output += "No new instances for training the model."; } } catch (Exception e) { e.printStackTrace(); } return output; }
From source file:soccer.core.SimpleClassifier.java
public void evaluate() throws IOException, Exception { Instances data = loader.buildInstances(); NumericToNominal toNominal = new NumericToNominal(); toNominal.setOptions(new String[] { "-R", "5,6,8,9" }); toNominal.setInputFormat(data);/*from w ww. ja v a 2 s. c o m*/ data = Filter.useFilter(data, toNominal); data.setClassIndex(6); // DataSink.write(ARFF_STRING, data); EnsembleLibrary ensembleLib = new EnsembleLibrary(); ensembleLib.addModel("weka.classifiers.trees.J48"); ensembleLib.addModel("weka.classifiers.bayes.NaiveBayes"); ensembleLib.addModel("weka.classifiers.functions.SMO"); ensembleLib.addModel("weka.classifiers.meta.AdaBoostM1"); ensembleLib.addModel("weka.classifiers.meta.LogitBoost"); ensembleLib.addModel("classifiers.trees.DecisionStump"); ensembleLib.addModel("classifiers.trees.DecisionStump"); EnsembleLibrary.saveLibrary(new File("./ensembleLib.model.xml"), ensembleLib, null); EnsembleSelection model = new EnsembleSelection(); model.setOptions(new String[] { "-L", "./ensembleLib.model.xml", // </path/to/modelLibrary>"-W", path+"esTmp", // </path/to/working/directory> - "-B", "10", // <numModelBags> "-E", "1.0", // <modelRatio>. "-V", "0.25", // <validationRatio> "-H", "100", // <hillClimbIterations> "-I", "1.0", // <sortInitialization> "-X", "2", // <numFolds> "-P", "roc", // <hillclimbMettric> "-A", "forward", // <algorithm> "-R", "true", // - Flag to be selected more than once "-G", "true", // - stops adding models when performance degrades "-O", "true", // - verbose output. "-S", "1", // <num> - Random number seed. "-D", "true" // - run in debug mode }); // double resES[] = evaluate(ensambleSel); // System.out.println("Ensemble Selection\n" // + "\tchurn: " + resES[0] + "\n" // + "\tappetency: " + resES[1] + "\n" // + "\tup-sell: " + resES[2] + "\n" // + "\toverall: " + resES[3] + "\n"); // models.add(new J48()); // models.add(new RandomForest()); // models.add(new NaiveBayes()); // models.add(new AdaBoostM1()); // models.add(new Logistic()); // models.add(new MultilayerPerceptron()); int FOLDS = 5; Evaluation eval = new Evaluation(data); // // for (Classifier model : models) { eval.crossValidateModel(model, data, FOLDS, new Random(1), new Object[] {}); System.out.println(model.getClass().getName() + "\n" + "\tRecall: " + eval.recall(1) + "\n" + "\tPrecision: " + eval.precision(1) + "\n" + "\tF-measure: " + eval.fMeasure(1)); System.out.println(eval.toSummaryString()); // } // LogitBoost cl = new LogitBoost(); // cl.setOptions(new String[] { // "-Q", "-I", "100", "-Z", "4", "-O", "4", "-E", "4" // }); // cl.buildClassifier(data); // Evaluation eval = new Evaluation(data); // eval.crossValidateModel(cl, data, 6, new Random(1), new Object[]{}); // System.out.println(eval.weightedFMeasure()); // System.out.println(cl.graph()); // System.out.println(cl.globalInfo()); }