List of usage examples for weka.classifiers Evaluation evaluateModel
public static String evaluateModel(Classifier classifier, String[] options) throws Exception
From source file:com.sliit.rules.RuleContainer.java
public String predictionResult(String filePath) throws Exception { File testPath = new File(filePath); CSVLoader loader = new CSVLoader(); loader.setSource(testPath);/*from w w w .j av a 2s . c o m*/ Instances testInstances = loader.getDataSet(); testInstances.setClassIndex(testInstances.numAttributes() - 1); Evaluation eval = new Evaluation(testInstances); eval.evaluateModel(ruleMoldel, testInstances); ArrayList<Prediction> predictions = eval.predictions(); int predictedVal = (int) predictions.get(0).predicted(); String cdetails = instances.classAttribute().value(predictedVal); return cdetails; }
From source file:controller.BothClassificationsServlet.java
@Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { request.setCharacterEncoding("UTF-8"); String dir = "/data/"; String path = getServletContext().getRealPath(dir); String action = request.getParameter("action"); switch (action) { case "create": { String fileName = request.getParameter("file"); String aux = fileName.substring(0, fileName.indexOf(".")); String pathInput = path + "/" + request.getParameter("file"); String pathTrainingOutput = path + "/" + aux + "-training-arff.txt"; String pathTestOutput = path + "/" + aux + "-test-arff.txt"; String pathBothClassifications = path + "/" + aux + "-bothClassifications.txt"; String name = request.getParameter("name"); int range = Integer.parseInt(request.getParameter("range")); int size = Integer.parseInt(request.getParameter("counter")); String[] columns = new String[size]; String[] types = new String[size]; int[] positions = new int[size]; int counter = 0; for (int i = 0; i < size; i++) { if (request.getParameter("column-" + (i + 1)) != null) { columns[counter] = request.getParameter("column-" + (i + 1)); types[counter] = request.getParameter("type-" + (i + 1)); positions[counter] = Integer.parseInt(request.getParameter("position-" + (i + 1))); counter++;// ww w.j a v a2 s . c o m } } FormatFiles.convertTxtToArff(pathInput, pathTrainingOutput, pathTestOutput, name, columns, types, positions, counter, range); try { J48 j48 = new J48(); BufferedReader readerTraining = new BufferedReader(new FileReader(pathTrainingOutput)); Instances instancesTraining = new Instances(readerTraining); instancesTraining.setClassIndex(instancesTraining.numAttributes() - 1); j48.buildClassifier(instancesTraining); BufferedReader readerTest = new BufferedReader(new FileReader(pathTestOutput)); //BufferedReader readerTest = new BufferedReader(new FileReader(pathTrainingOutput)); Instances instancesTest = new Instances(readerTest); instancesTest.setClassIndex(instancesTest.numAttributes() - 1); int correctsDecisionTree = 0; for (int i = 0; i < instancesTest.size(); i++) { Instance instance = instancesTest.get(i); double correctValue = instance.value(instance.attribute(instancesTest.numAttributes() - 1)); double classification = j48.classifyInstance(instance); if (correctValue == classification) { correctsDecisionTree++; } } Evaluation eval = new Evaluation(instancesTraining); eval.evaluateModel(j48, instancesTest); PrintWriter writer = new PrintWriter( new BufferedWriter(new FileWriter(pathBothClassifications, false))); writer.println("?rvore de Deciso\n\n"); writer.println(j48.toString()); writer.println(""); writer.println(""); writer.println("Results"); writer.println(eval.toSummaryString()); NaiveBayes naiveBayes = new NaiveBayes(); naiveBayes.buildClassifier(instancesTraining); eval = new Evaluation(instancesTraining); eval.evaluateModel(naiveBayes, instancesTest); int correctsNaiveBayes = 0; for (int i = 0; i < instancesTest.size(); i++) { Instance instance = instancesTest.get(i); double correctValue = instance.value(instance.attribute(instancesTest.numAttributes() - 1)); double classification = naiveBayes.classifyInstance(instance); if (correctValue == classification) { correctsNaiveBayes++; } } writer.println("Naive Bayes\n\n"); writer.println(naiveBayes.toString()); writer.println(""); writer.println(""); writer.println("Results"); writer.println(eval.toSummaryString()); writer.close(); response.sendRedirect("BothClassifications?action=view&correctsDecisionTree=" + correctsDecisionTree + "&correctsNaiveBayes=" + correctsNaiveBayes + "&totalTest=" + instancesTest.size() + "&totalTrainig=" + instancesTraining.size() + "&range=" + range + "&fileName=" + aux + "-bothClassifications.txt"); } catch (Exception e) { System.out.println(e.getMessage()); response.sendRedirect("Navigation?action=decisionTree"); } break; } default: response.sendError(404); } }
From source file:controller.DecisionTreeServlet.java
@Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { request.setCharacterEncoding("UTF-8"); String dir = "/data/"; String path = getServletContext().getRealPath(dir); String action = request.getParameter("action"); switch (action) { case "create": { String fileName = request.getParameter("file"); String aux = fileName.substring(0, fileName.indexOf(".")); String pathInput = path + "/" + request.getParameter("file"); String pathTrainingOutput = path + "/" + aux + "-training-arff.txt"; String pathTestOutput = path + "/" + aux + "-test-arff.txt"; String pathDecisionTree = path + "/" + aux + "-decisionTree.txt"; String name = request.getParameter("name"); int range = Integer.parseInt(request.getParameter("range")); int size = Integer.parseInt(request.getParameter("counter")); String[] columns = new String[size]; String[] types = new String[size]; int[] positions = new int[size]; int counter = 0; for (int i = 0; i < size; i++) { if (request.getParameter("column-" + (i + 1)) != null) { columns[counter] = request.getParameter("column-" + (i + 1)); types[counter] = request.getParameter("type-" + (i + 1)); positions[counter] = Integer.parseInt(request.getParameter("position-" + (i + 1))); counter++;/*w w w.j a v a2 s. com*/ } } FormatFiles.convertTxtToArff(pathInput, pathTrainingOutput, pathTestOutput, name, columns, types, positions, counter, range); try { J48 j48 = new J48(); BufferedReader readerTraining = new BufferedReader(new FileReader(pathTrainingOutput)); Instances instancesTraining = new Instances(readerTraining); instancesTraining.setClassIndex(instancesTraining.numAttributes() - 1); j48.buildClassifier(instancesTraining); BufferedReader readerTest = new BufferedReader(new FileReader(pathTestOutput)); //BufferedReader readerTest = new BufferedReader(new FileReader(pathTrainingOutput)); Instances instancesTest = new Instances(readerTest); instancesTest.setClassIndex(instancesTest.numAttributes() - 1); int corrects = 0; int truePositive = 0; int trueNegative = 0; int falsePositive = 0; int falseNegative = 0; for (int i = 0; i < instancesTest.size(); i++) { Instance instance = instancesTest.get(i); double correctValue = instance.value(instance.attribute(instancesTest.numAttributes() - 1)); double classification = j48.classifyInstance(instance); if (correctValue == classification) { corrects++; } if (correctValue == 1 && classification == 1) { truePositive++; } if (correctValue == 1 && classification == 0) { falseNegative++; } if (correctValue == 0 && classification == 1) { falsePositive++; } if (correctValue == 0 && classification == 0) { trueNegative++; } } Evaluation eval = new Evaluation(instancesTraining); eval.evaluateModel(j48, instancesTest); PrintWriter writer = new PrintWriter(new BufferedWriter(new FileWriter(pathDecisionTree, false))); writer.println(j48.toString()); writer.println(""); writer.println(""); writer.println("Results"); writer.println(eval.toSummaryString()); writer.close(); response.sendRedirect("DecisionTree?action=view&corrects=" + corrects + "&totalTest=" + instancesTest.size() + "&totalTrainig=" + instancesTraining.size() + "&truePositive=" + truePositive + "&trueNegative=" + trueNegative + "&falsePositive=" + falsePositive + "&falseNegative=" + falseNegative + "&fileName=" + aux + "-decisionTree.txt"); } catch (Exception e) { System.out.println(e.getMessage()); response.sendRedirect("Navigation?action=decisionTree"); } break; } default: response.sendError(404); } }
From source file:controller.NaiveBayesServlet.java
@Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { request.setCharacterEncoding("UTF-8"); String dir = "/data/"; String path = getServletContext().getRealPath(dir); String action = request.getParameter("action"); switch (action) { case "create": { String fileName = request.getParameter("file"); String aux = fileName.substring(0, fileName.indexOf(".")); String pathInput = path + "/" + request.getParameter("file"); String pathTrainingOutput = path + "/" + aux + "-training-arff.txt"; String pathTestOutput = path + "/" + aux + "-test-arff.txt"; String pathNaivebayes = path + "/" + aux + "-naiveBayes.txt"; String name = request.getParameter("name"); int range = Integer.parseInt(request.getParameter("range")); int size = Integer.parseInt(request.getParameter("counter")); String[] columns = new String[size]; String[] types = new String[size]; int[] positions = new int[size]; int counter = 0; for (int i = 0; i < size; i++) { if (request.getParameter("column-" + (i + 1)) != null) { columns[counter] = request.getParameter("column-" + (i + 1)); types[counter] = request.getParameter("type-" + (i + 1)); positions[counter] = Integer.parseInt(request.getParameter("position-" + (i + 1))); counter++;/* www .ja v a 2 s . c o m*/ } } FormatFiles.convertTxtToArff(pathInput, pathTrainingOutput, pathTestOutput, name, columns, types, positions, counter, range); try { NaiveBayes naiveBayes = new NaiveBayes(); BufferedReader readerTraining = new BufferedReader(new FileReader(pathTrainingOutput)); Instances instancesTraining = new Instances(readerTraining); instancesTraining.setClassIndex(instancesTraining.numAttributes() - 1); naiveBayes.buildClassifier(instancesTraining); BufferedReader readerTest = new BufferedReader(new FileReader(pathTestOutput)); //BufferedReader readerTest = new BufferedReader(new FileReader(pathTrainingOutput)); Instances instancesTest = new Instances(readerTest); instancesTest.setClassIndex(instancesTest.numAttributes() - 1); Evaluation eval = new Evaluation(instancesTraining); eval.evaluateModel(naiveBayes, instancesTest); int corrects = 0; int truePositive = 0; int trueNegative = 0; int falsePositive = 0; int falseNegative = 0; for (int i = 0; i < instancesTest.size(); i++) { Instance instance = instancesTest.get(i); double correctValue = instance.value(instance.attribute(instancesTest.numAttributes() - 1)); double classification = naiveBayes.classifyInstance(instance); if (correctValue == classification) { corrects++; } if (correctValue == 1 && classification == 1) { truePositive++; } if (correctValue == 1 && classification == 0) { falseNegative++; } if (correctValue == 0 && classification == 1) { falsePositive++; } if (correctValue == 0 && classification == 0) { trueNegative++; } } PrintWriter writer = new PrintWriter(new BufferedWriter(new FileWriter(pathNaivebayes, false))); writer.println(naiveBayes.toString()); writer.println(""); writer.println(""); writer.println("Results"); writer.println(eval.toSummaryString()); writer.close(); response.sendRedirect( "NaiveBayes?action=view&corrects=" + corrects + "&totalTest=" + instancesTest.size() + "&totalTrainig=" + instancesTraining.size() + "&range=" + range + "&truePositive=" + truePositive + "&trueNegative=" + trueNegative + "&falsePositive=" + falsePositive + "&falseNegative=" + falseNegative + "&fileName=" + aux + "-naiveBayes.txt"); } catch (Exception e) { System.out.println(e.getMessage()); response.sendRedirect("Navigation?action=naiveBayes"); } break; } default: response.sendError(404); } }
From source file:cs.man.ac.uk.classifiers.GetAUC.java
License:Open Source License
/** * Computes the AUC for the supplied learner. * @return the AUC as a double value./*w w w . j ava 2 s . c o m*/ */ @SuppressWarnings("unused") private static double validate5x2CV() { try { // other options int runs = 5; int folds = 2; double AUC_SUM = 0; // perform cross-validation for (int i = 0; i < runs; i++) { // randomize data int seed = i + 1; Random rand = new Random(seed); Instances randData = new Instances(data); randData.randomize(rand); if (randData.classAttribute().isNominal()) { System.out.println("Stratifying..."); randData.stratify(folds); } Evaluation eval = new Evaluation(randData); for (int n = 0; n < folds; n++) { Instances train = randData.trainCV(folds, n); Instances test = randData.testCV(folds, n); // the above code is used by the StratifiedRemoveFolds filter, the // code below by the Explorer/Experimenter: // Instances train = randData.trainCV(folds, n, rand); // build and evaluate classifier String[] options = { "-U", "-A" }; J48 classifier = new J48(); //HTree classifier = new HTree(); classifier.setOptions(options); classifier.buildClassifier(train); eval.evaluateModel(classifier, test); // generate curve ThresholdCurve tc = new ThresholdCurve(); int classIndex = 0; Instances result = tc.getCurve(eval.predictions(), classIndex); // plot curve vmc = new ThresholdVisualizePanel(); AUC_SUM += ThresholdCurve.getROCArea(result); System.out.println("AUC: " + ThresholdCurve.getROCArea(result) + " \tAUC SUM: " + AUC_SUM); } } return AUC_SUM / ((double) runs * (double) folds); } catch (Exception e) { System.out.println("Exception validating data!"); return 0; } }
From source file:csav2.Weka_additive.java
public void classifyTestSet1(String input) throws Exception { String ids = ""; ReaderWriter rw = new ReaderWriter(); //ATTRIBUTES//from w w w. j av a 2 s .c om Attribute attr[] = new Attribute[50]; //numeric attr[0] = new Attribute("Autosentiment"); //class FastVector classValue = new FastVector(3); classValue.addElement("p"); classValue.addElement("n"); classValue.addElement("o"); attr[1] = new Attribute("answer", classValue); FastVector attrs = new FastVector(); attrs.addElement(attr[0]); attrs.addElement(attr[1]); // Add Instances Instances dataset = new Instances("my_dataset", attrs, 0); StringTokenizer tokenizer = new StringTokenizer(input); while (tokenizer.hasMoreTokens()) { Instance example = new Instance(2); for (int j = 0; j < 2; j++) { String st = tokenizer.nextToken(); System.out.println(j + " " + st); if (j == 0) example.setValue(attr[j], Float.parseFloat(st)); else if (j == 1) example.setValue(attr[j], st); else example.setValue(attr[j], Integer.parseInt(st)); } ids += tokenizer.nextToken() + "\t"; dataset.add(example); } //Save dataset String file = "Classifier\\featurefile_additive_test1.arff"; ArffSaver saver = new ArffSaver(); saver.setInstances(dataset); saver.setFile(new File(file)); saver.writeBatch(); //Read dataset ArffLoader loader = new ArffLoader(); loader.setFile(new File(file)); dataset = loader.getDataSet(); //Build classifier dataset.setClassIndex(1); //Read classifier back String file1 = "Classifier\\classifier_add_autosentiment.model"; InputStream is = new FileInputStream(file1); Classifier classifier; ObjectInputStream objectInputStream = new ObjectInputStream(is); classifier = (Classifier) objectInputStream.readObject(); //Evaluate Instances test = new Instances(dataset, 0, dataset.numInstances()); test.setClassIndex(1); //Do eval Evaluation eval = new Evaluation(test); //trainset eval.evaluateModel(classifier, test); //testset System.out.println(eval.toSummaryString()); System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure()); System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision()); System.out.println("WEIGHTED RECALL:" + eval.weightedRecall()); //output predictions String optest = "", val = ""; StringTokenizer op = new StringTokenizer(ids); int count = 0; while (op.hasMoreTokens()) { double[] prediction = classifier.distributionForInstance(test.instance(count)); count += 1; //optest+=op.nextToken()+" "+Double.toString((double) Math.round((prediction[0]) * 1000) / 1000)+"\n"; if (prediction[0] > prediction[1]) { if (prediction[0] > prediction[2]) { val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000); } else { val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000); } } else { if (prediction[1] > prediction[2]) { val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000); } else { val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000); } } optest += op.nextToken() + "\t" + val + "\n"; } rw.writeToFile(optest, "Answers_additive_Test1", "txt"); }
From source file:csav2.Weka_additive.java
public void classifyTestSet2(String input) throws Exception { String ids = ""; ReaderWriter rw = new ReaderWriter(); //ATTRIBUTES/*from w w w.ja v a 2s. c o m*/ Attribute attr[] = new Attribute[50]; //numeric attr[0] = new Attribute("Autosentiment"); attr[1] = new Attribute("PostiveMatch"); attr[2] = new Attribute("NegativeMatch"); //class FastVector classValue = new FastVector(3); classValue.addElement("p"); classValue.addElement("n"); classValue.addElement("o"); attr[3] = new Attribute("answer", classValue); FastVector attrs = new FastVector(); attrs.addElement(attr[0]); attrs.addElement(attr[1]); attrs.addElement(attr[2]); attrs.addElement(attr[3]); // Add Instances Instances dataset = new Instances("my_dataset", attrs, 0); StringTokenizer tokenizer = new StringTokenizer(input); while (tokenizer.hasMoreTokens()) { Instance example = new Instance(4); for (int j = 0; j < 4; j++) { String st = tokenizer.nextToken(); System.out.println(j + " " + st); if (j == 0) example.setValue(attr[j], Float.parseFloat(st)); else if (j == 3) example.setValue(attr[j], st); else example.setValue(attr[j], Integer.parseInt(st)); } ids += tokenizer.nextToken() + "\t"; dataset.add(example); } //Save dataset String file = "Classifier\\featurefile_additive_test2.arff"; ArffSaver saver = new ArffSaver(); saver.setInstances(dataset); saver.setFile(new File(file)); saver.writeBatch(); //Read dataset ArffLoader loader = new ArffLoader(); loader.setFile(new File(file)); dataset = loader.getDataSet(); //Build classifier dataset.setClassIndex(3); //Read classifier back String file1 = "Classifier\\classifier_add_asAndpolarwords.model"; InputStream is = new FileInputStream(file1); Classifier classifier; ObjectInputStream objectInputStream = new ObjectInputStream(is); classifier = (Classifier) objectInputStream.readObject(); //Evaluate Instances test = new Instances(dataset, 0, dataset.numInstances()); test.setClassIndex(3); //Do eval Evaluation eval = new Evaluation(test); //trainset eval.evaluateModel(classifier, test); //testset System.out.println(eval.toSummaryString()); System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure()); System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision()); System.out.println("WEIGHTED RECALL:" + eval.weightedRecall()); //output predictions String optest = "", val = ""; StringTokenizer op = new StringTokenizer(ids); int count = 0; while (op.hasMoreTokens()) { double[] prediction = classifier.distributionForInstance(test.instance(count)); count += 1; if (prediction[0] > prediction[1]) { if (prediction[0] > prediction[2]) { val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000); } else { val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000); } } else { if (prediction[1] > prediction[2]) { val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000); } else { val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000); } } optest += op.nextToken() + "\t" + val + "\n"; } rw.writeToFile(optest, "Answers_additive_Test2", "txt"); }
From source file:csav2.Weka_additive.java
public void classifyTestSet3(String input) throws Exception { String ids = ""; ReaderWriter rw = new ReaderWriter(); //ATTRIBUTES/*from w ww. ja v a2 s.c om*/ Attribute attr[] = new Attribute[50]; //numeric attr[0] = new Attribute("Autosentiment"); attr[1] = new Attribute("PositiveMatch"); attr[2] = new Attribute("NegativeMatch"); attr[3] = new Attribute("FW"); attr[4] = new Attribute("JJ"); attr[5] = new Attribute("RB"); attr[6] = new Attribute("RB_JJ"); //class FastVector classValue = new FastVector(3); classValue.addElement("p"); classValue.addElement("n"); classValue.addElement("o"); attr[7] = new Attribute("answer", classValue); FastVector attrs = new FastVector(); attrs.addElement(attr[0]); attrs.addElement(attr[1]); attrs.addElement(attr[2]); attrs.addElement(attr[3]); attrs.addElement(attr[4]); attrs.addElement(attr[5]); attrs.addElement(attr[6]); attrs.addElement(attr[7]); // Add Instances Instances dataset = new Instances("my_dataset", attrs, 0); StringTokenizer tokenizer = new StringTokenizer(input); while (tokenizer.hasMoreTokens()) { Instance example = new Instance(8); for (int j = 0; j < 8; j++) { String st = tokenizer.nextToken(); System.out.println(j + " " + st); if (j == 0) example.setValue(attr[j], Float.parseFloat(st)); else if (j == 7) example.setValue(attr[j], st); else example.setValue(attr[j], Integer.parseInt(st)); } ids += tokenizer.nextToken() + "\t"; dataset.add(example); } //Save dataset String file = "Classifier\\featurefile_additive_test3.arff"; ArffSaver saver = new ArffSaver(); saver.setInstances(dataset); saver.setFile(new File(file)); saver.writeBatch(); //Read dataset ArffLoader loader = new ArffLoader(); loader.setFile(new File(file)); dataset = loader.getDataSet(); //Build classifier dataset.setClassIndex(7); //Read classifier back String file1 = "Classifier\\classifier_add_asAndpolarwordsAndpos.model"; InputStream is = new FileInputStream(file1); Classifier classifier; ObjectInputStream objectInputStream = new ObjectInputStream(is); classifier = (Classifier) objectInputStream.readObject(); //Evaluate Instances test = new Instances(dataset, 0, dataset.numInstances()); test.setClassIndex(7); //Do eval Evaluation eval = new Evaluation(test); //trainset eval.evaluateModel(classifier, test); //testset System.out.println(eval.toSummaryString()); System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure()); System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision()); System.out.println("WEIGHTED RECALL:" + eval.weightedRecall()); //output predictions String optest = "", val = ""; StringTokenizer op = new StringTokenizer(ids); int count = 0; while (op.hasMoreTokens()) { double[] prediction = classifier.distributionForInstance(test.instance(count)); count += 1; if (prediction[0] > prediction[1]) { if (prediction[0] > prediction[2]) { val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000); } else { val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000); } } else { if (prediction[1] > prediction[2]) { val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000); } else { val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000); } } optest += op.nextToken() + "\t" + val + "\n"; } rw.writeToFile(optest, "Answers_additive_Test3", "txt"); }
From source file:csav2.Weka_additive.java
public void classifyTestSet4(String input) throws Exception { String ids = ""; ReaderWriter rw = new ReaderWriter(); //ATTRIBUTES/*w ww .j a v a 2 s.c om*/ Attribute attr[] = new Attribute[50]; //numeric attr[0] = new Attribute("Autosentiment"); attr[1] = new Attribute("PositiveMatch"); attr[2] = new Attribute("NegativeMatch"); attr[3] = new Attribute("FW"); attr[4] = new Attribute("JJ"); attr[5] = new Attribute("RB"); attr[6] = new Attribute("RB_JJ"); attr[7] = new Attribute("amod"); attr[8] = new Attribute("acomp"); attr[9] = new Attribute("advmod"); //class FastVector classValue = new FastVector(3); classValue.addElement("p"); classValue.addElement("n"); classValue.addElement("o"); attr[10] = new Attribute("answer", classValue); FastVector attrs = new FastVector(); attrs.addElement(attr[0]); attrs.addElement(attr[1]); attrs.addElement(attr[2]); attrs.addElement(attr[3]); attrs.addElement(attr[4]); attrs.addElement(attr[5]); attrs.addElement(attr[6]); attrs.addElement(attr[7]); attrs.addElement(attr[8]); attrs.addElement(attr[9]); attrs.addElement(attr[10]); // Add Instances Instances dataset = new Instances("my_dataset", attrs, 0); StringTokenizer tokenizer = new StringTokenizer(input); while (tokenizer.hasMoreTokens()) { Instance example = new Instance(11); for (int j = 0; j < 11; j++) { String st = tokenizer.nextToken(); System.out.println(j + " " + st); if (j == 0) example.setValue(attr[j], Float.parseFloat(st)); else if (j == 10) example.setValue(attr[j], st); else example.setValue(attr[j], Integer.parseInt(st)); } ids += tokenizer.nextToken() + "\t"; dataset.add(example); } //Save dataset String file = "Classifier\\featurefile_additive_test4.arff"; ArffSaver saver = new ArffSaver(); saver.setInstances(dataset); saver.setFile(new File(file)); saver.writeBatch(); //Read dataset ArffLoader loader = new ArffLoader(); loader.setFile(new File(file)); dataset = loader.getDataSet(); //Build classifier dataset.setClassIndex(10); //Read classifier back String file1 = "Classifier\\classifier_add_asAndpolarwordsAndposAnddep.model"; InputStream is = new FileInputStream(file1); Classifier classifier; ObjectInputStream objectInputStream = new ObjectInputStream(is); classifier = (Classifier) objectInputStream.readObject(); //Evaluate Instances test = new Instances(dataset, 0, dataset.numInstances()); test.setClassIndex(10); //Do eval Evaluation eval = new Evaluation(test); //trainset eval.evaluateModel(classifier, test); //testset System.out.println(eval.toSummaryString()); System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure()); System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision()); System.out.println("WEIGHTED RECALL:" + eval.weightedRecall()); //output predictions String optest = "", val = ""; StringTokenizer op = new StringTokenizer(ids); int count = 0; while (op.hasMoreTokens()) { double[] prediction = classifier.distributionForInstance(test.instance(count)); count += 1; if (prediction[0] > prediction[1]) { if (prediction[0] > prediction[2]) { val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000); } else { val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000); } } else { if (prediction[1] > prediction[2]) { val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000); } else { val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000); } } optest += op.nextToken() + "\t" + val + "\n"; } rw.writeToFile(optest, "Answers_additive_Test4", "txt"); }
From source file:csav2.Weka_additive.java
public void classifyTestSet5(String input) throws Exception { String ids = ""; ReaderWriter rw = new ReaderWriter(); //ATTRIBUTES// w w w . j a va 2 s .c o m Attribute attr[] = new Attribute[50]; //numeric attr[0] = new Attribute("Autosentiment"); attr[1] = new Attribute("PositiveMatch"); attr[2] = new Attribute("NegativeMatch"); attr[3] = new Attribute("FW"); attr[4] = new Attribute("JJ"); attr[5] = new Attribute("RB"); attr[6] = new Attribute("RB_JJ"); attr[7] = new Attribute("amod"); attr[8] = new Attribute("acomp"); attr[9] = new Attribute("advmod"); attr[10] = new Attribute("BLPos"); attr[11] = new Attribute("BLNeg"); //class FastVector classValue = new FastVector(3); classValue.addElement("p"); classValue.addElement("n"); classValue.addElement("o"); attr[12] = new Attribute("answer", classValue); FastVector attrs = new FastVector(); attrs.addElement(attr[0]); attrs.addElement(attr[1]); attrs.addElement(attr[2]); attrs.addElement(attr[3]); attrs.addElement(attr[4]); attrs.addElement(attr[5]); attrs.addElement(attr[6]); attrs.addElement(attr[7]); attrs.addElement(attr[8]); attrs.addElement(attr[9]); attrs.addElement(attr[10]); attrs.addElement(attr[11]); attrs.addElement(attr[12]); // Add Instances Instances dataset = new Instances("my_dataset", attrs, 0); StringTokenizer tokenizer = new StringTokenizer(input); while (tokenizer.hasMoreTokens()) { Instance example = new Instance(13); for (int j = 0; j < 13; j++) { String st = tokenizer.nextToken(); System.out.println(j + " " + st); if (j == 0) example.setValue(attr[j], Float.parseFloat(st)); else if (j == 12) example.setValue(attr[j], st); else example.setValue(attr[j], Integer.parseInt(st)); } ids += tokenizer.nextToken() + "\t"; dataset.add(example); } //Save dataset String file = "Classifier\\featurefile_additive_test5.arff"; ArffSaver saver = new ArffSaver(); saver.setInstances(dataset); saver.setFile(new File(file)); saver.writeBatch(); //Read dataset ArffLoader loader = new ArffLoader(); loader.setFile(new File(file)); dataset = loader.getDataSet(); //Build classifier dataset.setClassIndex(12); //Read classifier back String file1 = "Classifier\\classifier_add_asAndpolarwordsAndposAnddepAndbl.model"; InputStream is = new FileInputStream(file1); Classifier classifier; ObjectInputStream objectInputStream = new ObjectInputStream(is); classifier = (Classifier) objectInputStream.readObject(); //Evaluate Instances test = new Instances(dataset, 0, dataset.numInstances()); test.setClassIndex(12); //Do eval Evaluation eval = new Evaluation(test); //trainset eval.evaluateModel(classifier, test); //testset System.out.println(eval.toSummaryString()); System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure()); System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision()); System.out.println("WEIGHTED RECALL:" + eval.weightedRecall()); //output predictions String optest = "", val = ""; StringTokenizer op = new StringTokenizer(ids); int count = 0; while (op.hasMoreTokens()) { double[] prediction = classifier.distributionForInstance(test.instance(count)); count += 1; if (prediction[0] > prediction[1]) { if (prediction[0] > prediction[2]) { val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000); } else { val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000); } } else { if (prediction[1] > prediction[2]) { val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000); } else { val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000); } } optest += op.nextToken() + "\t" + val + "\n"; } rw.writeToFile(optest, "Answers_additive_Test5", "txt"); }