Example usage for weka.classifiers EnsembleLibrary EnsembleLibrary

List of usage examples for weka.classifiers EnsembleLibrary EnsembleLibrary

Introduction

In this page you can find the example usage for weka.classifiers EnsembleLibrary EnsembleLibrary.

Prototype

public EnsembleLibrary() 

Source Link

Document

Constructor is responsible for initializing the data structure hoilding all of the models

Usage

From source file:soccer.core.SimpleClassifier.java

public void evaluate() throws IOException, Exception {
    Instances data = loader.buildInstances();
    NumericToNominal toNominal = new NumericToNominal();
    toNominal.setOptions(new String[] { "-R", "5,6,8,9" });
    toNominal.setInputFormat(data);//  w w  w.ja  va2s .c o  m
    data = Filter.useFilter(data, toNominal);
    data.setClassIndex(6);

    //        DataSink.write(ARFF_STRING, data);

    EnsembleLibrary ensembleLib = new EnsembleLibrary();
    ensembleLib.addModel("weka.classifiers.trees.J48");
    ensembleLib.addModel("weka.classifiers.bayes.NaiveBayes");
    ensembleLib.addModel("weka.classifiers.functions.SMO");
    ensembleLib.addModel("weka.classifiers.meta.AdaBoostM1");
    ensembleLib.addModel("weka.classifiers.meta.LogitBoost");
    ensembleLib.addModel("classifiers.trees.DecisionStump");
    ensembleLib.addModel("classifiers.trees.DecisionStump");
    EnsembleLibrary.saveLibrary(new File("./ensembleLib.model.xml"), ensembleLib, null);
    EnsembleSelection model = new EnsembleSelection();
    model.setOptions(new String[] { "-L", "./ensembleLib.model.xml", // </path/to/modelLibrary>"-W", path+"esTmp", // </path/to/working/directory> - 
            "-B", "10", // <numModelBags> 
            "-E", "1.0", // <modelRatio>.
            "-V", "0.25", // <validationRatio>
            "-H", "100", // <hillClimbIterations> 
            "-I", "1.0", // <sortInitialization> 
            "-X", "2", // <numFolds>
            "-P", "roc", // <hillclimbMettric>
            "-A", "forward", // <algorithm> 
            "-R", "true", // - Flag to be selected more than once
            "-G", "true", // - stops adding models when performance degrades
            "-O", "true", // - verbose output.
            "-S", "1", // <num> - Random number seed.
            "-D", "true" // - run in debug mode 
    });
    //        double resES[] = evaluate(ensambleSel);
    //        System.out.println("Ensemble Selection\n"
    //                + "\tchurn:     " + resES[0] + "\n"
    //                + "\tappetency: " + resES[1] + "\n"
    //                + "\tup-sell:   " + resES[2] + "\n"
    //                + "\toverall:   " + resES[3] + "\n");
    //        models.add(new J48());
    //        models.add(new RandomForest());
    //        models.add(new NaiveBayes());
    //        models.add(new AdaBoostM1());
    //        models.add(new Logistic());
    //        models.add(new MultilayerPerceptron());

    int FOLDS = 5;
    Evaluation eval = new Evaluation(data);
    //
    //        for (Classifier model : models) {
    eval.crossValidateModel(model, data, FOLDS, new Random(1), new Object[] {});
    System.out.println(model.getClass().getName() + "\n" + "\tRecall:    " + eval.recall(1) + "\n"
            + "\tPrecision: " + eval.precision(1) + "\n" + "\tF-measure: " + eval.fMeasure(1));
    System.out.println(eval.toSummaryString());
    //        }
    //        LogitBoost cl = new LogitBoost();
    //        cl.setOptions(new String[] {
    //            "-Q", "-I", "100", "-Z", "4", "-O", "4", "-E", "4"
    //        });
    //        cl.buildClassifier(data);
    //        Evaluation eval = new Evaluation(data);
    //        eval.crossValidateModel(cl, data, 6, new Random(1), new Object[]{});
    //        System.out.println(eval.weightedFMeasure());
    //        System.out.println(cl.graph());
    //        System.out.println(cl.globalInfo());

}