Example usage for weka.classifiers Evaluation weightedFMeasure

List of usage examples for weka.classifiers Evaluation weightedFMeasure

Introduction

In this page you can find the example usage for weka.classifiers Evaluation weightedFMeasure.

Prototype

public double weightedFMeasure() 

Source Link

Document

Calculates the macro weighted (by class size) average F-Measure.

Usage

From source file:csav2.Weka_additive.java

public void createTrainingFeatureFile5(String input) throws Exception {
    String file = "Classifier\\featurefile_additive_trial5.arff";
    ArffLoader loader = new ArffLoader();

    //ATTRIBUTES/* w ww  .j  a v a  2 s  .  c  o m*/
    Attribute attr[] = new Attribute[50];

    //numeric
    attr[0] = new Attribute("Autosentiment");
    attr[1] = new Attribute("PositiveMatch");
    attr[2] = new Attribute("NegativeMatch");
    attr[3] = new Attribute("FW");
    attr[4] = new Attribute("JJ");
    attr[5] = new Attribute("RB");
    attr[6] = new Attribute("RB_JJ");
    attr[7] = new Attribute("amod");
    attr[8] = new Attribute("acomp");
    attr[9] = new Attribute("advmod");
    attr[10] = new Attribute("BLPos");
    attr[11] = new Attribute("BLNeg");

    //class
    FastVector classValue = new FastVector(3);
    classValue.addElement("p");
    classValue.addElement("n");
    classValue.addElement("o");
    attr[12] = new Attribute("answer", classValue);

    FastVector attrs = new FastVector();
    attrs.addElement(attr[0]);
    attrs.addElement(attr[1]);
    attrs.addElement(attr[2]);
    attrs.addElement(attr[3]);
    attrs.addElement(attr[4]);
    attrs.addElement(attr[5]);
    attrs.addElement(attr[6]);
    attrs.addElement(attr[7]);
    attrs.addElement(attr[8]);
    attrs.addElement(attr[9]);
    attrs.addElement(attr[10]);
    attrs.addElement(attr[11]);
    attrs.addElement(attr[12]);

    // Add Instances
    Instances dataset = new Instances("my_dataset", attrs, 0);

    if (new File(file).isFile()) {
        loader.setFile(new File(file));
        dataset = loader.getDataSet();
    }

    System.out.println("-----------------------------------------");
    System.out.println(input);
    System.out.println("-----------------------------------------");

    StringTokenizer tokenizer = new StringTokenizer(input);

    while (tokenizer.hasMoreTokens()) {
        Instance example = new Instance(13);
        for (int j = 0; j < 13; j++) {
            String st = tokenizer.nextToken();
            System.out.println(j + " " + st);
            if (j == 0)
                example.setValue(attr[j], Float.parseFloat(st));
            else if (j == 12)
                example.setValue(attr[j], st);
            else
                example.setValue(attr[j], Integer.parseInt(st));
        }
        dataset.add(example);
    }

    //Save dataset
    ArffSaver saver = new ArffSaver();
    saver.setInstances(dataset);
    saver.setFile(new File(file));
    saver.writeBatch();

    //Read dataset
    loader.setFile(new File(file));
    dataset = loader.getDataSet();

    //Build classifier
    dataset.setClassIndex(12);
    Classifier classifier = new J48();
    classifier.buildClassifier(dataset);

    //Save classifier
    String file1 = "Classifier\\classifier_add_asAndpolarwordsAndposAnddepAndbl.model";
    OutputStream os = new FileOutputStream(file1);
    ObjectOutputStream objectOutputStream = new ObjectOutputStream(os);
    objectOutputStream.writeObject(classifier);

    // Comment out if not needed
    //Read classifier back
    InputStream is = new FileInputStream(file1);
    ObjectInputStream objectInputStream = new ObjectInputStream(is);
    classifier = (Classifier) objectInputStream.readObject();
    objectInputStream.close();

    //Evaluate resample if needed
    //dataset = dataset.resample(new Random(42));
    //split to 70:30 learn and test set
    double percent = 70.0;
    int trainSize = (int) Math.round(dataset.numInstances() * percent / 100);
    int testSize = dataset.numInstances() - trainSize;
    Instances train = new Instances(dataset, 0, trainSize);
    Instances test = new Instances(dataset, trainSize, testSize);
    train.setClassIndex(12);
    test.setClassIndex(12);

    //Evaluate
    Evaluation eval = new Evaluation(dataset); //trainset
    eval.crossValidateModel(classifier, dataset, 10, new Random(1));
    System.out.println("EVALUATION:\n" + eval.toSummaryString());
    System.out.println("WEIGHTED MEASURE:\n" + eval.weightedFMeasure());
    System.out.println("WEIGHTED PRECISION:\n" + eval.weightedPrecision());
    System.out.println("WEIGHTED RECALL:\n" + eval.weightedRecall());
}

From source file:csav2.Weka_additive.java

public void createTrainingFeatureFile6(String input) throws Exception {
    String file = "Classifier\\featurefile_additive_trial6.arff";
    ArffLoader loader = new ArffLoader();

    //ATTRIBUTES/*  w w w .java  2 s .  com*/
    Attribute attr[] = new Attribute[50];

    attr[0] = new Attribute("Autosentiment");
    attr[1] = new Attribute("PositiveMatch");
    attr[2] = new Attribute("NegativeMatch");
    attr[3] = new Attribute("FW");
    attr[4] = new Attribute("JJ");
    attr[5] = new Attribute("RB");
    attr[6] = new Attribute("RB_JJ");
    attr[7] = new Attribute("amod");
    attr[8] = new Attribute("acomp");
    attr[9] = new Attribute("advmod");
    attr[10] = new Attribute("BLPos");
    attr[11] = new Attribute("BLNeg");
    attr[12] = new Attribute("VSPositive");
    attr[13] = new Attribute("VSNegative");

    //class
    FastVector classValue = new FastVector(3);
    classValue.addElement("p");
    classValue.addElement("n");
    classValue.addElement("o");
    attr[14] = new Attribute("answer", classValue);

    FastVector attrs = new FastVector();
    attrs.addElement(attr[0]);
    attrs.addElement(attr[1]);
    attrs.addElement(attr[2]);
    attrs.addElement(attr[3]);
    attrs.addElement(attr[4]);
    attrs.addElement(attr[5]);
    attrs.addElement(attr[6]);
    attrs.addElement(attr[7]);
    attrs.addElement(attr[8]);
    attrs.addElement(attr[9]);
    attrs.addElement(attr[10]);
    attrs.addElement(attr[11]);
    attrs.addElement(attr[12]);
    attrs.addElement(attr[13]);
    attrs.addElement(attr[14]);

    // Add Instances
    Instances dataset = new Instances("my_dataset", attrs, 0);

    if (new File(file).isFile()) {
        loader.setFile(new File(file));
        dataset = loader.getDataSet();
    }

    System.out.println("-----------------------------------------");
    System.out.println(input);
    System.out.println("-----------------------------------------");

    StringTokenizer tokenizer = new StringTokenizer(input);

    while (tokenizer.hasMoreTokens()) {
        Instance example = new Instance(15);
        for (int j = 0; j < 15; j++) {
            String st = tokenizer.nextToken();
            System.out.println(j + " " + st);
            if (j == 0)
                example.setValue(attr[j], Float.parseFloat(st));
            else if (j == 14)
                example.setValue(attr[j], st);
            else
                example.setValue(attr[j], Integer.parseInt(st));
        }
        dataset.add(example);
    }

    //Save dataset
    ArffSaver saver = new ArffSaver();
    saver.setInstances(dataset);
    saver.setFile(new File(file));
    saver.writeBatch();

    //Read dataset
    loader.setFile(new File(file));
    dataset = loader.getDataSet();

    //Build classifier
    dataset.setClassIndex(14);
    Classifier classifier = new J48();
    classifier.buildClassifier(dataset);

    //Save classifier
    String file1 = "Classifier\\classifier_add_asAndpolarwordsAndposAnddepAndblAndvs.model";
    OutputStream os = new FileOutputStream(file1);
    ObjectOutputStream objectOutputStream = new ObjectOutputStream(os);
    objectOutputStream.writeObject(classifier);

    // Comment out if not needed
    //Read classifier back
    InputStream is = new FileInputStream(file1);
    ObjectInputStream objectInputStream = new ObjectInputStream(is);
    classifier = (Classifier) objectInputStream.readObject();
    objectInputStream.close();

    //Evaluate resample if needed
    //dataset = dataset.resample(new Random(42));
    //split to 70:30 learn and test set
    double percent = 70.0;
    int trainSize = (int) Math.round(dataset.numInstances() * percent / 100);
    int testSize = dataset.numInstances() - trainSize;
    Instances train = new Instances(dataset, 0, trainSize);
    Instances test = new Instances(dataset, trainSize, testSize);
    train.setClassIndex(14);
    test.setClassIndex(14);

    //Evaluate
    Evaluation eval = new Evaluation(dataset); //trainset
    eval.crossValidateModel(classifier, dataset, 10, new Random(1));
    System.out.println("EVALUATION:\n" + eval.toSummaryString());
    System.out.println("WEIGHTED MEASURE:\n" + eval.weightedFMeasure());
    System.out.println("WEIGHTED PRECISION:\n" + eval.weightedPrecision());
    System.out.println("WEIGHTED RECALL:\n" + eval.weightedRecall());
}

From source file:csav2.Weka_additive.java

public void classifyTestSet1(String input) throws Exception {
    String ids = "";
    ReaderWriter rw = new ReaderWriter();

    //ATTRIBUTES/*from w  ww .  jav  a2  s  . com*/
    Attribute attr[] = new Attribute[50];

    //numeric
    attr[0] = new Attribute("Autosentiment");

    //class
    FastVector classValue = new FastVector(3);
    classValue.addElement("p");
    classValue.addElement("n");
    classValue.addElement("o");
    attr[1] = new Attribute("answer", classValue);

    FastVector attrs = new FastVector();
    attrs.addElement(attr[0]);
    attrs.addElement(attr[1]);

    // Add Instances
    Instances dataset = new Instances("my_dataset", attrs, 0);

    StringTokenizer tokenizer = new StringTokenizer(input);

    while (tokenizer.hasMoreTokens()) {
        Instance example = new Instance(2);
        for (int j = 0; j < 2; j++) {
            String st = tokenizer.nextToken();
            System.out.println(j + " " + st);
            if (j == 0)
                example.setValue(attr[j], Float.parseFloat(st));
            else if (j == 1)
                example.setValue(attr[j], st);
            else
                example.setValue(attr[j], Integer.parseInt(st));
        }
        ids += tokenizer.nextToken() + "\t";
        dataset.add(example);
    }

    //Save dataset
    String file = "Classifier\\featurefile_additive_test1.arff";
    ArffSaver saver = new ArffSaver();
    saver.setInstances(dataset);
    saver.setFile(new File(file));
    saver.writeBatch();

    //Read dataset
    ArffLoader loader = new ArffLoader();
    loader.setFile(new File(file));
    dataset = loader.getDataSet();

    //Build classifier
    dataset.setClassIndex(1);

    //Read classifier back
    String file1 = "Classifier\\classifier_add_autosentiment.model";
    InputStream is = new FileInputStream(file1);
    Classifier classifier;
    ObjectInputStream objectInputStream = new ObjectInputStream(is);
    classifier = (Classifier) objectInputStream.readObject();

    //Evaluate
    Instances test = new Instances(dataset, 0, dataset.numInstances());
    test.setClassIndex(1);

    //Do eval
    Evaluation eval = new Evaluation(test); //trainset
    eval.evaluateModel(classifier, test); //testset
    System.out.println(eval.toSummaryString());
    System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure());
    System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision());
    System.out.println("WEIGHTED RECALL:" + eval.weightedRecall());

    //output predictions
    String optest = "", val = "";
    StringTokenizer op = new StringTokenizer(ids);
    int count = 0;
    while (op.hasMoreTokens()) {
        double[] prediction = classifier.distributionForInstance(test.instance(count));

        count += 1;
        //optest+=op.nextToken()+" "+Double.toString((double) Math.round((prediction[0]) * 1000) / 1000)+"\n";                
        if (prediction[0] > prediction[1]) {
            if (prediction[0] > prediction[2]) {
                val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        } else {
            if (prediction[1] > prediction[2]) {
                val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        }
        optest += op.nextToken() + "\t" + val + "\n";
    }
    rw.writeToFile(optest, "Answers_additive_Test1", "txt");
}

From source file:csav2.Weka_additive.java

public void classifyTestSet2(String input) throws Exception {
    String ids = "";
    ReaderWriter rw = new ReaderWriter();

    //ATTRIBUTES/*  w  w w . j  av  a  2 s .  c  om*/
    Attribute attr[] = new Attribute[50];

    //numeric
    attr[0] = new Attribute("Autosentiment");
    attr[1] = new Attribute("PostiveMatch");
    attr[2] = new Attribute("NegativeMatch");

    //class
    FastVector classValue = new FastVector(3);
    classValue.addElement("p");
    classValue.addElement("n");
    classValue.addElement("o");
    attr[3] = new Attribute("answer", classValue);

    FastVector attrs = new FastVector();
    attrs.addElement(attr[0]);
    attrs.addElement(attr[1]);
    attrs.addElement(attr[2]);
    attrs.addElement(attr[3]);

    // Add Instances
    Instances dataset = new Instances("my_dataset", attrs, 0);

    StringTokenizer tokenizer = new StringTokenizer(input);

    while (tokenizer.hasMoreTokens()) {
        Instance example = new Instance(4);
        for (int j = 0; j < 4; j++) {
            String st = tokenizer.nextToken();
            System.out.println(j + " " + st);
            if (j == 0)
                example.setValue(attr[j], Float.parseFloat(st));
            else if (j == 3)
                example.setValue(attr[j], st);
            else
                example.setValue(attr[j], Integer.parseInt(st));
        }
        ids += tokenizer.nextToken() + "\t";
        dataset.add(example);
    }

    //Save dataset
    String file = "Classifier\\featurefile_additive_test2.arff";
    ArffSaver saver = new ArffSaver();
    saver.setInstances(dataset);
    saver.setFile(new File(file));
    saver.writeBatch();

    //Read dataset
    ArffLoader loader = new ArffLoader();
    loader.setFile(new File(file));
    dataset = loader.getDataSet();

    //Build classifier
    dataset.setClassIndex(3);

    //Read classifier back
    String file1 = "Classifier\\classifier_add_asAndpolarwords.model";
    InputStream is = new FileInputStream(file1);
    Classifier classifier;
    ObjectInputStream objectInputStream = new ObjectInputStream(is);
    classifier = (Classifier) objectInputStream.readObject();

    //Evaluate
    Instances test = new Instances(dataset, 0, dataset.numInstances());
    test.setClassIndex(3);

    //Do eval
    Evaluation eval = new Evaluation(test); //trainset
    eval.evaluateModel(classifier, test); //testset
    System.out.println(eval.toSummaryString());
    System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure());
    System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision());
    System.out.println("WEIGHTED RECALL:" + eval.weightedRecall());

    //output predictions
    String optest = "", val = "";
    StringTokenizer op = new StringTokenizer(ids);
    int count = 0;
    while (op.hasMoreTokens()) {
        double[] prediction = classifier.distributionForInstance(test.instance(count));
        count += 1;
        if (prediction[0] > prediction[1]) {
            if (prediction[0] > prediction[2]) {
                val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        } else {
            if (prediction[1] > prediction[2]) {
                val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        }
        optest += op.nextToken() + "\t" + val + "\n";
    }
    rw.writeToFile(optest, "Answers_additive_Test2", "txt");
}

From source file:csav2.Weka_additive.java

public void classifyTestSet3(String input) throws Exception {
    String ids = "";
    ReaderWriter rw = new ReaderWriter();

    //ATTRIBUTES/*  w  ww .  j a v a2  s  . c  o m*/
    Attribute attr[] = new Attribute[50];

    //numeric
    attr[0] = new Attribute("Autosentiment");
    attr[1] = new Attribute("PositiveMatch");
    attr[2] = new Attribute("NegativeMatch");
    attr[3] = new Attribute("FW");
    attr[4] = new Attribute("JJ");
    attr[5] = new Attribute("RB");
    attr[6] = new Attribute("RB_JJ");

    //class
    FastVector classValue = new FastVector(3);
    classValue.addElement("p");
    classValue.addElement("n");
    classValue.addElement("o");
    attr[7] = new Attribute("answer", classValue);

    FastVector attrs = new FastVector();
    attrs.addElement(attr[0]);
    attrs.addElement(attr[1]);
    attrs.addElement(attr[2]);
    attrs.addElement(attr[3]);
    attrs.addElement(attr[4]);
    attrs.addElement(attr[5]);
    attrs.addElement(attr[6]);
    attrs.addElement(attr[7]);

    // Add Instances
    Instances dataset = new Instances("my_dataset", attrs, 0);

    StringTokenizer tokenizer = new StringTokenizer(input);

    while (tokenizer.hasMoreTokens()) {
        Instance example = new Instance(8);
        for (int j = 0; j < 8; j++) {
            String st = tokenizer.nextToken();
            System.out.println(j + " " + st);
            if (j == 0)
                example.setValue(attr[j], Float.parseFloat(st));
            else if (j == 7)
                example.setValue(attr[j], st);
            else
                example.setValue(attr[j], Integer.parseInt(st));
        }
        ids += tokenizer.nextToken() + "\t";
        dataset.add(example);
    }

    //Save dataset
    String file = "Classifier\\featurefile_additive_test3.arff";
    ArffSaver saver = new ArffSaver();
    saver.setInstances(dataset);
    saver.setFile(new File(file));
    saver.writeBatch();

    //Read dataset
    ArffLoader loader = new ArffLoader();
    loader.setFile(new File(file));
    dataset = loader.getDataSet();

    //Build classifier
    dataset.setClassIndex(7);

    //Read classifier back
    String file1 = "Classifier\\classifier_add_asAndpolarwordsAndpos.model";
    InputStream is = new FileInputStream(file1);
    Classifier classifier;
    ObjectInputStream objectInputStream = new ObjectInputStream(is);
    classifier = (Classifier) objectInputStream.readObject();

    //Evaluate
    Instances test = new Instances(dataset, 0, dataset.numInstances());
    test.setClassIndex(7);

    //Do eval
    Evaluation eval = new Evaluation(test); //trainset
    eval.evaluateModel(classifier, test); //testset
    System.out.println(eval.toSummaryString());
    System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure());
    System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision());
    System.out.println("WEIGHTED RECALL:" + eval.weightedRecall());

    //output predictions
    String optest = "", val = "";
    StringTokenizer op = new StringTokenizer(ids);
    int count = 0;
    while (op.hasMoreTokens()) {
        double[] prediction = classifier.distributionForInstance(test.instance(count));
        count += 1;

        if (prediction[0] > prediction[1]) {
            if (prediction[0] > prediction[2]) {
                val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        } else {
            if (prediction[1] > prediction[2]) {
                val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        }
        optest += op.nextToken() + "\t" + val + "\n";
    }
    rw.writeToFile(optest, "Answers_additive_Test3", "txt");
}

From source file:csav2.Weka_additive.java

public void classifyTestSet4(String input) throws Exception {
    String ids = "";
    ReaderWriter rw = new ReaderWriter();

    //ATTRIBUTES//from   www  .j ava 2  s  .  c o m
    Attribute attr[] = new Attribute[50];

    //numeric
    attr[0] = new Attribute("Autosentiment");
    attr[1] = new Attribute("PositiveMatch");
    attr[2] = new Attribute("NegativeMatch");
    attr[3] = new Attribute("FW");
    attr[4] = new Attribute("JJ");
    attr[5] = new Attribute("RB");
    attr[6] = new Attribute("RB_JJ");
    attr[7] = new Attribute("amod");
    attr[8] = new Attribute("acomp");
    attr[9] = new Attribute("advmod");

    //class
    FastVector classValue = new FastVector(3);
    classValue.addElement("p");
    classValue.addElement("n");
    classValue.addElement("o");
    attr[10] = new Attribute("answer", classValue);

    FastVector attrs = new FastVector();
    attrs.addElement(attr[0]);
    attrs.addElement(attr[1]);
    attrs.addElement(attr[2]);
    attrs.addElement(attr[3]);
    attrs.addElement(attr[4]);
    attrs.addElement(attr[5]);
    attrs.addElement(attr[6]);
    attrs.addElement(attr[7]);
    attrs.addElement(attr[8]);
    attrs.addElement(attr[9]);
    attrs.addElement(attr[10]);

    // Add Instances
    Instances dataset = new Instances("my_dataset", attrs, 0);

    StringTokenizer tokenizer = new StringTokenizer(input);

    while (tokenizer.hasMoreTokens()) {
        Instance example = new Instance(11);
        for (int j = 0; j < 11; j++) {
            String st = tokenizer.nextToken();
            System.out.println(j + " " + st);
            if (j == 0)
                example.setValue(attr[j], Float.parseFloat(st));
            else if (j == 10)
                example.setValue(attr[j], st);
            else
                example.setValue(attr[j], Integer.parseInt(st));
        }
        ids += tokenizer.nextToken() + "\t";
        dataset.add(example);
    }

    //Save dataset
    String file = "Classifier\\featurefile_additive_test4.arff";
    ArffSaver saver = new ArffSaver();
    saver.setInstances(dataset);
    saver.setFile(new File(file));
    saver.writeBatch();

    //Read dataset
    ArffLoader loader = new ArffLoader();
    loader.setFile(new File(file));
    dataset = loader.getDataSet();

    //Build classifier
    dataset.setClassIndex(10);

    //Read classifier back
    String file1 = "Classifier\\classifier_add_asAndpolarwordsAndposAnddep.model";
    InputStream is = new FileInputStream(file1);
    Classifier classifier;
    ObjectInputStream objectInputStream = new ObjectInputStream(is);
    classifier = (Classifier) objectInputStream.readObject();

    //Evaluate
    Instances test = new Instances(dataset, 0, dataset.numInstances());
    test.setClassIndex(10);

    //Do eval
    Evaluation eval = new Evaluation(test); //trainset
    eval.evaluateModel(classifier, test); //testset
    System.out.println(eval.toSummaryString());
    System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure());
    System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision());
    System.out.println("WEIGHTED RECALL:" + eval.weightedRecall());

    //output predictions
    String optest = "", val = "";
    StringTokenizer op = new StringTokenizer(ids);
    int count = 0;
    while (op.hasMoreTokens()) {
        double[] prediction = classifier.distributionForInstance(test.instance(count));
        count += 1;
        if (prediction[0] > prediction[1]) {
            if (prediction[0] > prediction[2]) {
                val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        } else {
            if (prediction[1] > prediction[2]) {
                val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        }
        optest += op.nextToken() + "\t" + val + "\n";
    }
    rw.writeToFile(optest, "Answers_additive_Test4", "txt");
}

From source file:csav2.Weka_additive.java

public void classifyTestSet5(String input) throws Exception {
    String ids = "";
    ReaderWriter rw = new ReaderWriter();

    //ATTRIBUTES//from w w w .  ja  va2s.com
    Attribute attr[] = new Attribute[50];

    //numeric
    attr[0] = new Attribute("Autosentiment");
    attr[1] = new Attribute("PositiveMatch");
    attr[2] = new Attribute("NegativeMatch");
    attr[3] = new Attribute("FW");
    attr[4] = new Attribute("JJ");
    attr[5] = new Attribute("RB");
    attr[6] = new Attribute("RB_JJ");
    attr[7] = new Attribute("amod");
    attr[8] = new Attribute("acomp");
    attr[9] = new Attribute("advmod");
    attr[10] = new Attribute("BLPos");
    attr[11] = new Attribute("BLNeg");

    //class
    FastVector classValue = new FastVector(3);
    classValue.addElement("p");
    classValue.addElement("n");
    classValue.addElement("o");
    attr[12] = new Attribute("answer", classValue);

    FastVector attrs = new FastVector();
    attrs.addElement(attr[0]);
    attrs.addElement(attr[1]);
    attrs.addElement(attr[2]);
    attrs.addElement(attr[3]);
    attrs.addElement(attr[4]);
    attrs.addElement(attr[5]);
    attrs.addElement(attr[6]);
    attrs.addElement(attr[7]);
    attrs.addElement(attr[8]);
    attrs.addElement(attr[9]);
    attrs.addElement(attr[10]);
    attrs.addElement(attr[11]);
    attrs.addElement(attr[12]);

    // Add Instances
    Instances dataset = new Instances("my_dataset", attrs, 0);

    StringTokenizer tokenizer = new StringTokenizer(input);

    while (tokenizer.hasMoreTokens()) {
        Instance example = new Instance(13);
        for (int j = 0; j < 13; j++) {
            String st = tokenizer.nextToken();
            System.out.println(j + " " + st);
            if (j == 0)
                example.setValue(attr[j], Float.parseFloat(st));
            else if (j == 12)
                example.setValue(attr[j], st);
            else
                example.setValue(attr[j], Integer.parseInt(st));
        }
        ids += tokenizer.nextToken() + "\t";
        dataset.add(example);
    }

    //Save dataset
    String file = "Classifier\\featurefile_additive_test5.arff";
    ArffSaver saver = new ArffSaver();
    saver.setInstances(dataset);
    saver.setFile(new File(file));
    saver.writeBatch();

    //Read dataset
    ArffLoader loader = new ArffLoader();
    loader.setFile(new File(file));
    dataset = loader.getDataSet();

    //Build classifier
    dataset.setClassIndex(12);

    //Read classifier back
    String file1 = "Classifier\\classifier_add_asAndpolarwordsAndposAnddepAndbl.model";
    InputStream is = new FileInputStream(file1);
    Classifier classifier;
    ObjectInputStream objectInputStream = new ObjectInputStream(is);
    classifier = (Classifier) objectInputStream.readObject();

    //Evaluate
    Instances test = new Instances(dataset, 0, dataset.numInstances());
    test.setClassIndex(12);

    //Do eval
    Evaluation eval = new Evaluation(test); //trainset
    eval.evaluateModel(classifier, test); //testset
    System.out.println(eval.toSummaryString());
    System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure());
    System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision());
    System.out.println("WEIGHTED RECALL:" + eval.weightedRecall());

    //output predictions
    String optest = "", val = "";
    StringTokenizer op = new StringTokenizer(ids);
    int count = 0;
    while (op.hasMoreTokens()) {
        double[] prediction = classifier.distributionForInstance(test.instance(count));
        count += 1;
        if (prediction[0] > prediction[1]) {
            if (prediction[0] > prediction[2]) {
                val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        } else {
            if (prediction[1] > prediction[2]) {
                val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        }
        optest += op.nextToken() + "\t" + val + "\n";
    }
    rw.writeToFile(optest, "Answers_additive_Test5", "txt");
}

From source file:csav2.Weka_additive.java

public void classifyTestSet6(String input) throws Exception {
    String ids = "";
    ReaderWriter rw = new ReaderWriter();

    //ATTRIBUTES//from  www. j  a  v  a  2  s  .  c om
    Attribute attr[] = new Attribute[50];

    //numeric
    attr[0] = new Attribute("Autosentiment");
    attr[1] = new Attribute("PositiveMatch");
    attr[2] = new Attribute("NegativeMatch");
    attr[3] = new Attribute("FW");
    attr[4] = new Attribute("JJ");
    attr[5] = new Attribute("RB");
    attr[6] = new Attribute("RB_JJ");
    attr[7] = new Attribute("amod");
    attr[8] = new Attribute("acomp");
    attr[9] = new Attribute("advmod");
    attr[10] = new Attribute("BLPos");
    attr[11] = new Attribute("BLNeg");
    attr[12] = new Attribute("VSPos");
    attr[13] = new Attribute("VSNeg");

    //class
    FastVector classValue = new FastVector(3);
    classValue.addElement("p");
    classValue.addElement("n");
    classValue.addElement("o");
    attr[14] = new Attribute("answer", classValue);

    FastVector attrs = new FastVector();
    attrs.addElement(attr[0]);
    attrs.addElement(attr[1]);
    attrs.addElement(attr[2]);
    attrs.addElement(attr[3]);
    attrs.addElement(attr[4]);
    attrs.addElement(attr[5]);
    attrs.addElement(attr[6]);
    attrs.addElement(attr[7]);
    attrs.addElement(attr[8]);
    attrs.addElement(attr[9]);
    attrs.addElement(attr[10]);
    attrs.addElement(attr[11]);
    attrs.addElement(attr[12]);
    attrs.addElement(attr[13]);
    attrs.addElement(attr[14]);

    // Add Instances
    Instances dataset = new Instances("my_dataset", attrs, 0);

    StringTokenizer tokenizer = new StringTokenizer(input);

    while (tokenizer.hasMoreTokens()) {
        Instance example = new Instance(15);
        for (int j = 0; j < 15; j++) {
            String st = tokenizer.nextToken();
            System.out.println(j + " " + st);
            if (j == 0)
                example.setValue(attr[j], Float.parseFloat(st));
            else if (j == 14)
                example.setValue(attr[j], st);
            else
                example.setValue(attr[j], Integer.parseInt(st));
        }
        ids += tokenizer.nextToken() + "\t";
        dataset.add(example);
    }

    //Save dataset
    String file = "Classifier\\featurefile_additive_test6.arff";
    ArffSaver saver = new ArffSaver();
    saver.setInstances(dataset);
    saver.setFile(new File(file));
    saver.writeBatch();

    //Read dataset
    ArffLoader loader = new ArffLoader();
    loader.setFile(new File(file));
    dataset = loader.getDataSet();

    //Build classifier
    dataset.setClassIndex(14);

    //Read classifier back
    String file1 = "Classifier\\classifier_asAndpolarwordsAndposAnddepAndblAndvs.model";
    InputStream is = new FileInputStream(file1);
    Classifier classifier;
    ObjectInputStream objectInputStream = new ObjectInputStream(is);
    classifier = (Classifier) objectInputStream.readObject();

    //Evaluate
    Instances test = new Instances(dataset, 0, dataset.numInstances());
    test.setClassIndex(14);

    //Do eval
    Evaluation eval = new Evaluation(test); //trainset
    eval.evaluateModel(classifier, test); //testset
    System.out.println(eval.toSummaryString());
    System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure());
    System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision());
    System.out.println("WEIGHTED RECALL:" + eval.weightedRecall());

    //output predictions
    String optest = "", val = "";
    StringTokenizer op = new StringTokenizer(ids);
    int count = 0;
    while (op.hasMoreTokens()) {
        double[] prediction = classifier.distributionForInstance(test.instance(count));
        count += 1;
        if (prediction[0] > prediction[1]) {
            if (prediction[0] > prediction[2]) {
                val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        } else {
            if (prediction[1] > prediction[2]) {
                val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        }
        optest += op.nextToken() + "\t" + val + "\n";
    }
    rw.writeToFile(optest, "Answers_additive_Test6", "txt");
}

From source file:hero.unstable.test.parkinson.ClusteringBinaryPD.java

@Override
public void evaluate(Solutions<Variable<Boolean>> solutions) {
    idxGeneration++;/* w  ww . j a va 2 s .  c o  m*/
    for (int i = 0; i < solutions.size(); i++) {
        Solution<Variable<Boolean>> solution = solutions.get(i);

        // Select features:
        List<Integer> remainingFeatures = solutionAsList(solution);

        // Filter TRAINING data:
        Instances filteredData = data.filterAttributes("training", remainingFeatures);

        // Evaluate TRAINING:           
        try {
            Evaluation result = cls.classify(filteredData);

            // Store metrics of the solution:
            double avgFValue = result.weightedFMeasure();
            double avgTPR = result.weightedPrecision();
            double avgPPV = result.weightedRecall();

            solution.getObjectives().set(0, 1 - avgFValue);
            //solution.getObjectives().set(1, (double)filteredData.numAttributes());

            logger.info("Average F-value = " + avgFValue);
            if (avgFValue > bestAccuracy) {
                logger.info("Best F-value = " + avgFValue + ". Number of features =  "
                        + filteredData.numAttributes() + ". For gen:" + (idxGeneration - 1) + ", solution: " + i
                        + ". avgTPR: " + avgTPR + ", avgPPV: " + avgPPV);
                logger.info("Number of good solutions found: " + numOfGoodSolutions++);
                bestAccuracy = avgFValue;
                bestClassifier = cls;

                // Measure overfitting
                logger.info("Overffiting: " + (avgFValue
                        - cls.classify(data.filterAttributes("test", remainingFeatures)).fMeasure(1)));//weightedFMeasure()));                    
            }
        } catch (Exception ex) {
            Logger.getLogger(ClusteringBinaryPD.class.getName()).log(Level.SEVERE, null, ex);
        }
    } //END FOR SOLUTIONS        
}

From source file:hero.unstable.util.classification.wekaClassifier.java

/** Result as:
 * [correctClassified, TPR(class True), TPR(class False), avgTPR, PPV(class True), PPV(class False), avgPPV,  Fvalue(class True), Fvalue(class False), avgFvalue]   
 * @param result//from www  . j  ava  2  s.  co m
 * @return 10 metrics
 */
public double[] getMetrics(Evaluation result) {
    double[] metrics = new double[10];

    metrics[0] = result.pctCorrect() / 100;

    metrics[1] = result.precision(0);
    metrics[2] = result.precision(1);
    metrics[3] = result.weightedPrecision();

    metrics[4] = result.recall(0);
    metrics[5] = result.recall(1);
    metrics[6] = result.weightedRecall();

    metrics[7] = result.fMeasure(0);
    metrics[8] = result.fMeasure(1);
    metrics[9] = result.weightedFMeasure();

    return metrics;
}