Example usage for weka.classifiers Evaluation Evaluation

List of usage examples for weka.classifiers Evaluation Evaluation

Introduction

In this page you can find the example usage for weka.classifiers Evaluation Evaluation.

Prototype

public Evaluation(Instances data) throws Exception 

Source Link

Usage

From source file:csav2.Weka_additive.java

public void createTrainingFeatureFile6(String input) throws Exception {
    String file = "Classifier\\featurefile_additive_trial6.arff";
    ArffLoader loader = new ArffLoader();

    //ATTRIBUTES//from  w  w w  . j  a va  2 s . c o m
    Attribute attr[] = new Attribute[50];

    attr[0] = new Attribute("Autosentiment");
    attr[1] = new Attribute("PositiveMatch");
    attr[2] = new Attribute("NegativeMatch");
    attr[3] = new Attribute("FW");
    attr[4] = new Attribute("JJ");
    attr[5] = new Attribute("RB");
    attr[6] = new Attribute("RB_JJ");
    attr[7] = new Attribute("amod");
    attr[8] = new Attribute("acomp");
    attr[9] = new Attribute("advmod");
    attr[10] = new Attribute("BLPos");
    attr[11] = new Attribute("BLNeg");
    attr[12] = new Attribute("VSPositive");
    attr[13] = new Attribute("VSNegative");

    //class
    FastVector classValue = new FastVector(3);
    classValue.addElement("p");
    classValue.addElement("n");
    classValue.addElement("o");
    attr[14] = new Attribute("answer", classValue);

    FastVector attrs = new FastVector();
    attrs.addElement(attr[0]);
    attrs.addElement(attr[1]);
    attrs.addElement(attr[2]);
    attrs.addElement(attr[3]);
    attrs.addElement(attr[4]);
    attrs.addElement(attr[5]);
    attrs.addElement(attr[6]);
    attrs.addElement(attr[7]);
    attrs.addElement(attr[8]);
    attrs.addElement(attr[9]);
    attrs.addElement(attr[10]);
    attrs.addElement(attr[11]);
    attrs.addElement(attr[12]);
    attrs.addElement(attr[13]);
    attrs.addElement(attr[14]);

    // Add Instances
    Instances dataset = new Instances("my_dataset", attrs, 0);

    if (new File(file).isFile()) {
        loader.setFile(new File(file));
        dataset = loader.getDataSet();
    }

    System.out.println("-----------------------------------------");
    System.out.println(input);
    System.out.println("-----------------------------------------");

    StringTokenizer tokenizer = new StringTokenizer(input);

    while (tokenizer.hasMoreTokens()) {
        Instance example = new Instance(15);
        for (int j = 0; j < 15; j++) {
            String st = tokenizer.nextToken();
            System.out.println(j + " " + st);
            if (j == 0)
                example.setValue(attr[j], Float.parseFloat(st));
            else if (j == 14)
                example.setValue(attr[j], st);
            else
                example.setValue(attr[j], Integer.parseInt(st));
        }
        dataset.add(example);
    }

    //Save dataset
    ArffSaver saver = new ArffSaver();
    saver.setInstances(dataset);
    saver.setFile(new File(file));
    saver.writeBatch();

    //Read dataset
    loader.setFile(new File(file));
    dataset = loader.getDataSet();

    //Build classifier
    dataset.setClassIndex(14);
    Classifier classifier = new J48();
    classifier.buildClassifier(dataset);

    //Save classifier
    String file1 = "Classifier\\classifier_add_asAndpolarwordsAndposAnddepAndblAndvs.model";
    OutputStream os = new FileOutputStream(file1);
    ObjectOutputStream objectOutputStream = new ObjectOutputStream(os);
    objectOutputStream.writeObject(classifier);

    // Comment out if not needed
    //Read classifier back
    InputStream is = new FileInputStream(file1);
    ObjectInputStream objectInputStream = new ObjectInputStream(is);
    classifier = (Classifier) objectInputStream.readObject();
    objectInputStream.close();

    //Evaluate resample if needed
    //dataset = dataset.resample(new Random(42));
    //split to 70:30 learn and test set
    double percent = 70.0;
    int trainSize = (int) Math.round(dataset.numInstances() * percent / 100);
    int testSize = dataset.numInstances() - trainSize;
    Instances train = new Instances(dataset, 0, trainSize);
    Instances test = new Instances(dataset, trainSize, testSize);
    train.setClassIndex(14);
    test.setClassIndex(14);

    //Evaluate
    Evaluation eval = new Evaluation(dataset); //trainset
    eval.crossValidateModel(classifier, dataset, 10, new Random(1));
    System.out.println("EVALUATION:\n" + eval.toSummaryString());
    System.out.println("WEIGHTED MEASURE:\n" + eval.weightedFMeasure());
    System.out.println("WEIGHTED PRECISION:\n" + eval.weightedPrecision());
    System.out.println("WEIGHTED RECALL:\n" + eval.weightedRecall());
}

From source file:csav2.Weka_additive.java

public void classifyTestSet1(String input) throws Exception {
    String ids = "";
    ReaderWriter rw = new ReaderWriter();

    //ATTRIBUTES/*from w w w.j a  va2  s .c  om*/
    Attribute attr[] = new Attribute[50];

    //numeric
    attr[0] = new Attribute("Autosentiment");

    //class
    FastVector classValue = new FastVector(3);
    classValue.addElement("p");
    classValue.addElement("n");
    classValue.addElement("o");
    attr[1] = new Attribute("answer", classValue);

    FastVector attrs = new FastVector();
    attrs.addElement(attr[0]);
    attrs.addElement(attr[1]);

    // Add Instances
    Instances dataset = new Instances("my_dataset", attrs, 0);

    StringTokenizer tokenizer = new StringTokenizer(input);

    while (tokenizer.hasMoreTokens()) {
        Instance example = new Instance(2);
        for (int j = 0; j < 2; j++) {
            String st = tokenizer.nextToken();
            System.out.println(j + " " + st);
            if (j == 0)
                example.setValue(attr[j], Float.parseFloat(st));
            else if (j == 1)
                example.setValue(attr[j], st);
            else
                example.setValue(attr[j], Integer.parseInt(st));
        }
        ids += tokenizer.nextToken() + "\t";
        dataset.add(example);
    }

    //Save dataset
    String file = "Classifier\\featurefile_additive_test1.arff";
    ArffSaver saver = new ArffSaver();
    saver.setInstances(dataset);
    saver.setFile(new File(file));
    saver.writeBatch();

    //Read dataset
    ArffLoader loader = new ArffLoader();
    loader.setFile(new File(file));
    dataset = loader.getDataSet();

    //Build classifier
    dataset.setClassIndex(1);

    //Read classifier back
    String file1 = "Classifier\\classifier_add_autosentiment.model";
    InputStream is = new FileInputStream(file1);
    Classifier classifier;
    ObjectInputStream objectInputStream = new ObjectInputStream(is);
    classifier = (Classifier) objectInputStream.readObject();

    //Evaluate
    Instances test = new Instances(dataset, 0, dataset.numInstances());
    test.setClassIndex(1);

    //Do eval
    Evaluation eval = new Evaluation(test); //trainset
    eval.evaluateModel(classifier, test); //testset
    System.out.println(eval.toSummaryString());
    System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure());
    System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision());
    System.out.println("WEIGHTED RECALL:" + eval.weightedRecall());

    //output predictions
    String optest = "", val = "";
    StringTokenizer op = new StringTokenizer(ids);
    int count = 0;
    while (op.hasMoreTokens()) {
        double[] prediction = classifier.distributionForInstance(test.instance(count));

        count += 1;
        //optest+=op.nextToken()+" "+Double.toString((double) Math.round((prediction[0]) * 1000) / 1000)+"\n";                
        if (prediction[0] > prediction[1]) {
            if (prediction[0] > prediction[2]) {
                val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        } else {
            if (prediction[1] > prediction[2]) {
                val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        }
        optest += op.nextToken() + "\t" + val + "\n";
    }
    rw.writeToFile(optest, "Answers_additive_Test1", "txt");
}

From source file:csav2.Weka_additive.java

public void classifyTestSet2(String input) throws Exception {
    String ids = "";
    ReaderWriter rw = new ReaderWriter();

    //ATTRIBUTES//from  w w w  .  ja va 2  s .  c  om
    Attribute attr[] = new Attribute[50];

    //numeric
    attr[0] = new Attribute("Autosentiment");
    attr[1] = new Attribute("PostiveMatch");
    attr[2] = new Attribute("NegativeMatch");

    //class
    FastVector classValue = new FastVector(3);
    classValue.addElement("p");
    classValue.addElement("n");
    classValue.addElement("o");
    attr[3] = new Attribute("answer", classValue);

    FastVector attrs = new FastVector();
    attrs.addElement(attr[0]);
    attrs.addElement(attr[1]);
    attrs.addElement(attr[2]);
    attrs.addElement(attr[3]);

    // Add Instances
    Instances dataset = new Instances("my_dataset", attrs, 0);

    StringTokenizer tokenizer = new StringTokenizer(input);

    while (tokenizer.hasMoreTokens()) {
        Instance example = new Instance(4);
        for (int j = 0; j < 4; j++) {
            String st = tokenizer.nextToken();
            System.out.println(j + " " + st);
            if (j == 0)
                example.setValue(attr[j], Float.parseFloat(st));
            else if (j == 3)
                example.setValue(attr[j], st);
            else
                example.setValue(attr[j], Integer.parseInt(st));
        }
        ids += tokenizer.nextToken() + "\t";
        dataset.add(example);
    }

    //Save dataset
    String file = "Classifier\\featurefile_additive_test2.arff";
    ArffSaver saver = new ArffSaver();
    saver.setInstances(dataset);
    saver.setFile(new File(file));
    saver.writeBatch();

    //Read dataset
    ArffLoader loader = new ArffLoader();
    loader.setFile(new File(file));
    dataset = loader.getDataSet();

    //Build classifier
    dataset.setClassIndex(3);

    //Read classifier back
    String file1 = "Classifier\\classifier_add_asAndpolarwords.model";
    InputStream is = new FileInputStream(file1);
    Classifier classifier;
    ObjectInputStream objectInputStream = new ObjectInputStream(is);
    classifier = (Classifier) objectInputStream.readObject();

    //Evaluate
    Instances test = new Instances(dataset, 0, dataset.numInstances());
    test.setClassIndex(3);

    //Do eval
    Evaluation eval = new Evaluation(test); //trainset
    eval.evaluateModel(classifier, test); //testset
    System.out.println(eval.toSummaryString());
    System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure());
    System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision());
    System.out.println("WEIGHTED RECALL:" + eval.weightedRecall());

    //output predictions
    String optest = "", val = "";
    StringTokenizer op = new StringTokenizer(ids);
    int count = 0;
    while (op.hasMoreTokens()) {
        double[] prediction = classifier.distributionForInstance(test.instance(count));
        count += 1;
        if (prediction[0] > prediction[1]) {
            if (prediction[0] > prediction[2]) {
                val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        } else {
            if (prediction[1] > prediction[2]) {
                val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        }
        optest += op.nextToken() + "\t" + val + "\n";
    }
    rw.writeToFile(optest, "Answers_additive_Test2", "txt");
}

From source file:csav2.Weka_additive.java

public void classifyTestSet3(String input) throws Exception {
    String ids = "";
    ReaderWriter rw = new ReaderWriter();

    //ATTRIBUTES/*from  w ww  . ja v  a2 s. c o  m*/
    Attribute attr[] = new Attribute[50];

    //numeric
    attr[0] = new Attribute("Autosentiment");
    attr[1] = new Attribute("PositiveMatch");
    attr[2] = new Attribute("NegativeMatch");
    attr[3] = new Attribute("FW");
    attr[4] = new Attribute("JJ");
    attr[5] = new Attribute("RB");
    attr[6] = new Attribute("RB_JJ");

    //class
    FastVector classValue = new FastVector(3);
    classValue.addElement("p");
    classValue.addElement("n");
    classValue.addElement("o");
    attr[7] = new Attribute("answer", classValue);

    FastVector attrs = new FastVector();
    attrs.addElement(attr[0]);
    attrs.addElement(attr[1]);
    attrs.addElement(attr[2]);
    attrs.addElement(attr[3]);
    attrs.addElement(attr[4]);
    attrs.addElement(attr[5]);
    attrs.addElement(attr[6]);
    attrs.addElement(attr[7]);

    // Add Instances
    Instances dataset = new Instances("my_dataset", attrs, 0);

    StringTokenizer tokenizer = new StringTokenizer(input);

    while (tokenizer.hasMoreTokens()) {
        Instance example = new Instance(8);
        for (int j = 0; j < 8; j++) {
            String st = tokenizer.nextToken();
            System.out.println(j + " " + st);
            if (j == 0)
                example.setValue(attr[j], Float.parseFloat(st));
            else if (j == 7)
                example.setValue(attr[j], st);
            else
                example.setValue(attr[j], Integer.parseInt(st));
        }
        ids += tokenizer.nextToken() + "\t";
        dataset.add(example);
    }

    //Save dataset
    String file = "Classifier\\featurefile_additive_test3.arff";
    ArffSaver saver = new ArffSaver();
    saver.setInstances(dataset);
    saver.setFile(new File(file));
    saver.writeBatch();

    //Read dataset
    ArffLoader loader = new ArffLoader();
    loader.setFile(new File(file));
    dataset = loader.getDataSet();

    //Build classifier
    dataset.setClassIndex(7);

    //Read classifier back
    String file1 = "Classifier\\classifier_add_asAndpolarwordsAndpos.model";
    InputStream is = new FileInputStream(file1);
    Classifier classifier;
    ObjectInputStream objectInputStream = new ObjectInputStream(is);
    classifier = (Classifier) objectInputStream.readObject();

    //Evaluate
    Instances test = new Instances(dataset, 0, dataset.numInstances());
    test.setClassIndex(7);

    //Do eval
    Evaluation eval = new Evaluation(test); //trainset
    eval.evaluateModel(classifier, test); //testset
    System.out.println(eval.toSummaryString());
    System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure());
    System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision());
    System.out.println("WEIGHTED RECALL:" + eval.weightedRecall());

    //output predictions
    String optest = "", val = "";
    StringTokenizer op = new StringTokenizer(ids);
    int count = 0;
    while (op.hasMoreTokens()) {
        double[] prediction = classifier.distributionForInstance(test.instance(count));
        count += 1;

        if (prediction[0] > prediction[1]) {
            if (prediction[0] > prediction[2]) {
                val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        } else {
            if (prediction[1] > prediction[2]) {
                val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        }
        optest += op.nextToken() + "\t" + val + "\n";
    }
    rw.writeToFile(optest, "Answers_additive_Test3", "txt");
}

From source file:csav2.Weka_additive.java

public void classifyTestSet4(String input) throws Exception {
    String ids = "";
    ReaderWriter rw = new ReaderWriter();

    //ATTRIBUTES/*from w  w w  . j  ava 2  s.  com*/
    Attribute attr[] = new Attribute[50];

    //numeric
    attr[0] = new Attribute("Autosentiment");
    attr[1] = new Attribute("PositiveMatch");
    attr[2] = new Attribute("NegativeMatch");
    attr[3] = new Attribute("FW");
    attr[4] = new Attribute("JJ");
    attr[5] = new Attribute("RB");
    attr[6] = new Attribute("RB_JJ");
    attr[7] = new Attribute("amod");
    attr[8] = new Attribute("acomp");
    attr[9] = new Attribute("advmod");

    //class
    FastVector classValue = new FastVector(3);
    classValue.addElement("p");
    classValue.addElement("n");
    classValue.addElement("o");
    attr[10] = new Attribute("answer", classValue);

    FastVector attrs = new FastVector();
    attrs.addElement(attr[0]);
    attrs.addElement(attr[1]);
    attrs.addElement(attr[2]);
    attrs.addElement(attr[3]);
    attrs.addElement(attr[4]);
    attrs.addElement(attr[5]);
    attrs.addElement(attr[6]);
    attrs.addElement(attr[7]);
    attrs.addElement(attr[8]);
    attrs.addElement(attr[9]);
    attrs.addElement(attr[10]);

    // Add Instances
    Instances dataset = new Instances("my_dataset", attrs, 0);

    StringTokenizer tokenizer = new StringTokenizer(input);

    while (tokenizer.hasMoreTokens()) {
        Instance example = new Instance(11);
        for (int j = 0; j < 11; j++) {
            String st = tokenizer.nextToken();
            System.out.println(j + " " + st);
            if (j == 0)
                example.setValue(attr[j], Float.parseFloat(st));
            else if (j == 10)
                example.setValue(attr[j], st);
            else
                example.setValue(attr[j], Integer.parseInt(st));
        }
        ids += tokenizer.nextToken() + "\t";
        dataset.add(example);
    }

    //Save dataset
    String file = "Classifier\\featurefile_additive_test4.arff";
    ArffSaver saver = new ArffSaver();
    saver.setInstances(dataset);
    saver.setFile(new File(file));
    saver.writeBatch();

    //Read dataset
    ArffLoader loader = new ArffLoader();
    loader.setFile(new File(file));
    dataset = loader.getDataSet();

    //Build classifier
    dataset.setClassIndex(10);

    //Read classifier back
    String file1 = "Classifier\\classifier_add_asAndpolarwordsAndposAnddep.model";
    InputStream is = new FileInputStream(file1);
    Classifier classifier;
    ObjectInputStream objectInputStream = new ObjectInputStream(is);
    classifier = (Classifier) objectInputStream.readObject();

    //Evaluate
    Instances test = new Instances(dataset, 0, dataset.numInstances());
    test.setClassIndex(10);

    //Do eval
    Evaluation eval = new Evaluation(test); //trainset
    eval.evaluateModel(classifier, test); //testset
    System.out.println(eval.toSummaryString());
    System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure());
    System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision());
    System.out.println("WEIGHTED RECALL:" + eval.weightedRecall());

    //output predictions
    String optest = "", val = "";
    StringTokenizer op = new StringTokenizer(ids);
    int count = 0;
    while (op.hasMoreTokens()) {
        double[] prediction = classifier.distributionForInstance(test.instance(count));
        count += 1;
        if (prediction[0] > prediction[1]) {
            if (prediction[0] > prediction[2]) {
                val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        } else {
            if (prediction[1] > prediction[2]) {
                val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        }
        optest += op.nextToken() + "\t" + val + "\n";
    }
    rw.writeToFile(optest, "Answers_additive_Test4", "txt");
}

From source file:csav2.Weka_additive.java

public void classifyTestSet5(String input) throws Exception {
    String ids = "";
    ReaderWriter rw = new ReaderWriter();

    //ATTRIBUTES//from w w  w  .ja v  a  2  s. c  o  m
    Attribute attr[] = new Attribute[50];

    //numeric
    attr[0] = new Attribute("Autosentiment");
    attr[1] = new Attribute("PositiveMatch");
    attr[2] = new Attribute("NegativeMatch");
    attr[3] = new Attribute("FW");
    attr[4] = new Attribute("JJ");
    attr[5] = new Attribute("RB");
    attr[6] = new Attribute("RB_JJ");
    attr[7] = new Attribute("amod");
    attr[8] = new Attribute("acomp");
    attr[9] = new Attribute("advmod");
    attr[10] = new Attribute("BLPos");
    attr[11] = new Attribute("BLNeg");

    //class
    FastVector classValue = new FastVector(3);
    classValue.addElement("p");
    classValue.addElement("n");
    classValue.addElement("o");
    attr[12] = new Attribute("answer", classValue);

    FastVector attrs = new FastVector();
    attrs.addElement(attr[0]);
    attrs.addElement(attr[1]);
    attrs.addElement(attr[2]);
    attrs.addElement(attr[3]);
    attrs.addElement(attr[4]);
    attrs.addElement(attr[5]);
    attrs.addElement(attr[6]);
    attrs.addElement(attr[7]);
    attrs.addElement(attr[8]);
    attrs.addElement(attr[9]);
    attrs.addElement(attr[10]);
    attrs.addElement(attr[11]);
    attrs.addElement(attr[12]);

    // Add Instances
    Instances dataset = new Instances("my_dataset", attrs, 0);

    StringTokenizer tokenizer = new StringTokenizer(input);

    while (tokenizer.hasMoreTokens()) {
        Instance example = new Instance(13);
        for (int j = 0; j < 13; j++) {
            String st = tokenizer.nextToken();
            System.out.println(j + " " + st);
            if (j == 0)
                example.setValue(attr[j], Float.parseFloat(st));
            else if (j == 12)
                example.setValue(attr[j], st);
            else
                example.setValue(attr[j], Integer.parseInt(st));
        }
        ids += tokenizer.nextToken() + "\t";
        dataset.add(example);
    }

    //Save dataset
    String file = "Classifier\\featurefile_additive_test5.arff";
    ArffSaver saver = new ArffSaver();
    saver.setInstances(dataset);
    saver.setFile(new File(file));
    saver.writeBatch();

    //Read dataset
    ArffLoader loader = new ArffLoader();
    loader.setFile(new File(file));
    dataset = loader.getDataSet();

    //Build classifier
    dataset.setClassIndex(12);

    //Read classifier back
    String file1 = "Classifier\\classifier_add_asAndpolarwordsAndposAnddepAndbl.model";
    InputStream is = new FileInputStream(file1);
    Classifier classifier;
    ObjectInputStream objectInputStream = new ObjectInputStream(is);
    classifier = (Classifier) objectInputStream.readObject();

    //Evaluate
    Instances test = new Instances(dataset, 0, dataset.numInstances());
    test.setClassIndex(12);

    //Do eval
    Evaluation eval = new Evaluation(test); //trainset
    eval.evaluateModel(classifier, test); //testset
    System.out.println(eval.toSummaryString());
    System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure());
    System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision());
    System.out.println("WEIGHTED RECALL:" + eval.weightedRecall());

    //output predictions
    String optest = "", val = "";
    StringTokenizer op = new StringTokenizer(ids);
    int count = 0;
    while (op.hasMoreTokens()) {
        double[] prediction = classifier.distributionForInstance(test.instance(count));
        count += 1;
        if (prediction[0] > prediction[1]) {
            if (prediction[0] > prediction[2]) {
                val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        } else {
            if (prediction[1] > prediction[2]) {
                val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        }
        optest += op.nextToken() + "\t" + val + "\n";
    }
    rw.writeToFile(optest, "Answers_additive_Test5", "txt");
}

From source file:csav2.Weka_additive.java

public void classifyTestSet6(String input) throws Exception {
    String ids = "";
    ReaderWriter rw = new ReaderWriter();

    //ATTRIBUTES/*from  ww w . ja va 2 s .  c om*/
    Attribute attr[] = new Attribute[50];

    //numeric
    attr[0] = new Attribute("Autosentiment");
    attr[1] = new Attribute("PositiveMatch");
    attr[2] = new Attribute("NegativeMatch");
    attr[3] = new Attribute("FW");
    attr[4] = new Attribute("JJ");
    attr[5] = new Attribute("RB");
    attr[6] = new Attribute("RB_JJ");
    attr[7] = new Attribute("amod");
    attr[8] = new Attribute("acomp");
    attr[9] = new Attribute("advmod");
    attr[10] = new Attribute("BLPos");
    attr[11] = new Attribute("BLNeg");
    attr[12] = new Attribute("VSPos");
    attr[13] = new Attribute("VSNeg");

    //class
    FastVector classValue = new FastVector(3);
    classValue.addElement("p");
    classValue.addElement("n");
    classValue.addElement("o");
    attr[14] = new Attribute("answer", classValue);

    FastVector attrs = new FastVector();
    attrs.addElement(attr[0]);
    attrs.addElement(attr[1]);
    attrs.addElement(attr[2]);
    attrs.addElement(attr[3]);
    attrs.addElement(attr[4]);
    attrs.addElement(attr[5]);
    attrs.addElement(attr[6]);
    attrs.addElement(attr[7]);
    attrs.addElement(attr[8]);
    attrs.addElement(attr[9]);
    attrs.addElement(attr[10]);
    attrs.addElement(attr[11]);
    attrs.addElement(attr[12]);
    attrs.addElement(attr[13]);
    attrs.addElement(attr[14]);

    // Add Instances
    Instances dataset = new Instances("my_dataset", attrs, 0);

    StringTokenizer tokenizer = new StringTokenizer(input);

    while (tokenizer.hasMoreTokens()) {
        Instance example = new Instance(15);
        for (int j = 0; j < 15; j++) {
            String st = tokenizer.nextToken();
            System.out.println(j + " " + st);
            if (j == 0)
                example.setValue(attr[j], Float.parseFloat(st));
            else if (j == 14)
                example.setValue(attr[j], st);
            else
                example.setValue(attr[j], Integer.parseInt(st));
        }
        ids += tokenizer.nextToken() + "\t";
        dataset.add(example);
    }

    //Save dataset
    String file = "Classifier\\featurefile_additive_test6.arff";
    ArffSaver saver = new ArffSaver();
    saver.setInstances(dataset);
    saver.setFile(new File(file));
    saver.writeBatch();

    //Read dataset
    ArffLoader loader = new ArffLoader();
    loader.setFile(new File(file));
    dataset = loader.getDataSet();

    //Build classifier
    dataset.setClassIndex(14);

    //Read classifier back
    String file1 = "Classifier\\classifier_asAndpolarwordsAndposAnddepAndblAndvs.model";
    InputStream is = new FileInputStream(file1);
    Classifier classifier;
    ObjectInputStream objectInputStream = new ObjectInputStream(is);
    classifier = (Classifier) objectInputStream.readObject();

    //Evaluate
    Instances test = new Instances(dataset, 0, dataset.numInstances());
    test.setClassIndex(14);

    //Do eval
    Evaluation eval = new Evaluation(test); //trainset
    eval.evaluateModel(classifier, test); //testset
    System.out.println(eval.toSummaryString());
    System.out.println("WEIGHTED F-MEASURE:" + eval.weightedFMeasure());
    System.out.println("WEIGHTED PRECISION:" + eval.weightedPrecision());
    System.out.println("WEIGHTED RECALL:" + eval.weightedRecall());

    //output predictions
    String optest = "", val = "";
    StringTokenizer op = new StringTokenizer(ids);
    int count = 0;
    while (op.hasMoreTokens()) {
        double[] prediction = classifier.distributionForInstance(test.instance(count));
        count += 1;
        if (prediction[0] > prediction[1]) {
            if (prediction[0] > prediction[2]) {
                val = "p: " + Double.toString((double) Math.round((prediction[0]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        } else {
            if (prediction[1] > prediction[2]) {
                val = "n: " + Double.toString((double) Math.round((prediction[1]) * 1000) / 1000);
            } else {
                val = "o: " + Double.toString((double) Math.round((prediction[2]) * 1000) / 1000);
            }
        }
        optest += op.nextToken() + "\t" + val + "\n";
    }
    rw.writeToFile(optest, "Answers_additive_Test6", "txt");
}

From source file:cyber009.main.UDALNeuralNetwork.java

public static void main(String[] args) {
    UDALNeuralNetwork udal = new UDALNeuralNetwork(0.014013);
    Statistics statis = new Statistics(udal.v);
    long timeStart = 0, timeEnd = 0;
    for (int f = 2; f <= 2; f++) {
        udal.initUDAL(4, 5000);//from  w  w w. j  a v a2s  . c  o  m
        udal.activeLearning(0, 5000);
        udal.arraytoInstances();
        udal.ann.weightReset();
        timeStart = System.currentTimeMillis();
        MultilayerPerceptron wekaNN = new MultilayerPerceptron();
        wekaNN.setAutoBuild(true);
        //wekaNN.setGUI(true);
        try {
            wekaNN.buildClassifier(udal.dataSet);
            Evaluation eval = new Evaluation(udal.dataSet);
            System.out.println(wekaNN.toString());
            eval.crossValidateModel(wekaNN, udal.dataSet, 4999, new Random(System.currentTimeMillis()));
            System.out.println(wekaNN.toString());
            System.out.println(eval.toClassDetailsString());

            //            udal.ann.gradientDescent(10000L, 3, 100);
            //            for (Double target : udal.v.CLASSES) {
            //                statis.calMVMuSigma(target);
            //                System.out.println(udal.v.N_DATA_IN_CLASS.get(target));
            //                System.out.println(statis.mu.get(target));
            //                System.out.println(statis.sigma.get(target));
            //            }
            //            for(int d=0; d<udal.v.D; d++) {
            //                if(udal.v.LABEL[d] == false) {
            //                    double [][] val = new double[udal.v.N-1][1];
            //                    for(int n=1; n<udal.v.N; n++) {
            //                        val[n-1][0] = udal.v.X[d][n];
            ////                        System.out.print(udal.v.X[d][n] + "   ");
            ////                        System.out.println(val[n-1][0]);
            //                    }
            //                    Matrix mVal = new Matrix(val);
            //                    double pp = 0.0D;
            //                    for (Double target : udal.v.CLASSES) {
            //                        //System.out.println("-----------------------\nClass:"+ target);
            //                        pp += statis.posteriorDistribution(target, mVal);
            //                        System.out.println("conditional: Entropy: "+ 
            //                                statis.conditionalEntropy(target, mVal, d));
            //                    }
            //                    System.out.print("Sum posterior:"+ pp+ " for "+new Matrix(val).transpose());
            //                    
            //                }
            //            }
            //            System.out.println("-----------------------");
            //            timeEnd = System.currentTimeMillis();
            //            System.out.println("feature #:"+udal.v.N+" time:("+ (timeEnd - timeStart) +")");
            //            udal.v.showResult();
            //            
        } catch (Exception ex) {
            Logger.getLogger(UDALNeuralNetwork.class.getName()).log(Level.SEVERE, null, ex);
        }

    }
}

From source file:de.fub.maps.project.detector.model.inference.processhandler.CrossValidationProcessHandler.java

License:Open Source License

private void evaluate(Instances trainingSet) {
    try {/*from  ww w. jav  a2s.com*/
        Evaluation evaluation = new Evaluation(trainingSet);
        int crossValidationFoldsCount = getCrossValidationFoldsCount();
        crossValidationFoldsCount = crossValidationFoldsCount > trainingSet.size() ? trainingSet.size()
                : crossValidationFoldsCount;
        evaluation.crossValidateModel(getInferenceModel().getClassifier(), trainingSet,
                crossValidationFoldsCount, new Random(1));
        updateVisualRepresentation(evaluation);
    } catch (Exception ex) {
        throw new InferenceModelClassifyException(ex.getMessage(), ex);
    }
}

From source file:de.fub.maps.project.detector.model.inference.processhandler.TrainingsDataProcessHandler.java

License:Open Source License

private void evaluate(Instances trainingSet, Instances testingSet) {
    Classifier classifier = getInferenceModel().getClassifier();
    try {//  w  ww .  j  a v  a 2s.c o  m
        classifier.buildClassifier(trainingSet);
        Evaluation evaluation = new Evaluation(testingSet);
        evaluation.evaluateModel(classifier, testingSet);
        updateVisualRepresentation(evaluation);
    } catch (Exception ex) {
        throw new InferenceModelClassifyException(ex.getMessage(), ex);
    }
}