Example usage for weka.attributeSelection Ranker Ranker

List of usage examples for weka.attributeSelection Ranker Ranker

Introduction

In this page you can find the example usage for weka.attributeSelection Ranker Ranker.

Prototype

public Ranker() 

Source Link

Document

Constructor

Usage

From source file:ia03classificador.jFrClassificador.java

public void doClassificate() throws Exception {

    // Quando clicado, a variavel recebe 1, quando no clicado recebe 0
    v00 = ((btn00.isSelected()) ? ((double) 1) : ((double) 0));
    v01 = ((btn01.isSelected()) ? ((double) 1) : ((double) 0));
    v02 = ((btn02.isSelected()) ? ((double) 1) : ((double) 0));
    v03 = ((btn03.isSelected()) ? ((double) 1) : ((double) 0));
    v04 = ((btn04.isSelected()) ? ((double) 1) : ((double) 0));
    v05 = ((btn05.isSelected()) ? ((double) 1) : ((double) 0));
    v06 = ((btn06.isSelected()) ? ((double) 1) : ((double) 0));
    v07 = ((btn07.isSelected()) ? ((double) 1) : ((double) 0));
    v08 = ((btn08.isSelected()) ? ((double) 1) : ((double) 0));
    v09 = ((btn09.isSelected()) ? ((double) 1) : ((double) 0));
    v10 = ((btn10.isSelected()) ? ((double) 1) : ((double) 0));
    v11 = ((btn11.isSelected()) ? ((double) 1) : ((double) 0));
    v13 = ((btn13.isSelected()) ? ((double) 1) : ((double) 0));
    v14 = ((btn14.isSelected()) ? ((double) 1) : ((double) 0));
    v15 = ((btn15.isSelected()) ? ((double) 1) : ((double) 0));
    legs = txtLegs.getText();//from  w  ww  . ja  v  a  2s.co  m
    legs = ((legs == null || legs.trim().isEmpty() ? "2" : legs));
    name = txtName.getText();

    // abre o banco de dados arff e guarda os registros no objeto dados
    ConverterUtils.DataSource arquivo = new ConverterUtils.DataSource("data/zoo.arff");
    Instances dados = arquivo.getDataSet();

    // FILTER: remove o atributo nome do animal da classificao
    String[] parametros = new String[] { "-R", "1" };
    Remove filtro = new Remove();
    filtro.setOptions(parametros);
    filtro.setInputFormat(dados);
    dados = Filter.useFilter(dados, filtro);

    AttributeSelection selAtributo = new AttributeSelection();
    InfoGainAttributeEval avaliador = new InfoGainAttributeEval();
    Ranker busca = new Ranker();
    selAtributo.setEvaluator(avaliador);
    selAtributo.setSearch(busca);
    selAtributo.SelectAttributes(dados);
    int[] indices = selAtributo.selectedAttributes();
    //System.out.println("Selected attributes: " + Utils.arrayToString(indices));

    // Usa o algoritimo J48 para montar a arvore de dados
    String[] opcoes = new String[1];
    opcoes[0] = "-U";
    J48 arvore = new J48();
    arvore.setOptions(opcoes);
    arvore.buildClassifier(dados);

    // cria o novo elemento para comparao
    double[] vals = new double[dados.numAttributes()];
    vals[0] = v00; // hair
    vals[1] = v01; // feathers
    vals[2] = v02; // eggs
    vals[3] = v03; // milk
    vals[4] = v04; // airborne
    vals[5] = v05; // aquatic
    vals[6] = v06; // predator
    vals[7] = v07; // toothed
    vals[8] = v08; // backbone
    vals[9] = v09; // breathes
    vals[10] = v10; // venomous
    vals[11] = v11; // fins
    vals[12] = Double.parseDouble(legs); // legs
    vals[13] = v13; // tail
    vals[14] = v14; // domestic
    vals[15] = v15; // catsize

    // Criar uma instncia baseada nestes atributos
    Instance newAnimal = new DenseInstance(1.0, vals);

    // Adicionar a instncia nos dados
    newAnimal.setDataset(dados);

    // Classificar esta nova instncia
    double label = arvore.classifyInstance(newAnimal);

    // Imprimir o resultado da classificao
    lblClassification.setText(dados.classAttribute().value((int) label));

}

From source file:it.poliba.sisinflab.simlib.featureSelection.methods.CHI.java

public void execute(String dataset) {
    try {/*  w  ww.  j  a v a  2  s. c o  m*/

        if (dataset.length() == 0)
            throw new IllegalArgumentException();
        // Load input dataset.
        DataSource source = new DataSource(dataset);
        System.out.println("Reading instances...");
        Instances data = source.getDataSet();

        // Performs a principal components analysis.
        ChiSquaredAttributeEval chiEvaluator = new ChiSquaredAttributeEval();

        // Ranking the attributes.
        Ranker ranker = new Ranker();
        // Specify the number of attributes to select from the ranked list.
        /*ranker.setThreshold(-1.7976931348623157E308);
        ranker.setNumToSelect(-1);
        ranker.setGenerateRanking(true);*/
        ranker.setNumToSelect(-1);

        AttributeSelection selector = new AttributeSelection();
        System.out.println("Selecting attributes...");
        selector.setSearch(ranker);
        selector.setEvaluator(chiEvaluator);
        selector.SelectAttributes(data);

        PrintStream o = new PrintStream(new File("data/" + "CHIResults" + ".txt"));
        System.setOut(o);
        System.out.println(Arrays.toString(selector.rankedAttributes()));
        System.out.println(Arrays.toString(selector.selectedAttributes()));
        //System.out.println(selector.CVResultsString());
        System.out.println(selector.toResultsString());

        System.out.println();

    } catch (IllegalArgumentException e) {
        System.err.println("Error");
    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:it.poliba.sisinflab.simlib.featureSelection.methods.PCA.java

public void execute(String dataset) {
    try {/*from   w  ww .  j  a  v a 2  s  .c om*/

        if (dataset.length() == 0)
            throw new IllegalArgumentException();
        // Load input dataset.
        DataSource source = new DataSource(dataset);
        Instances data = source.getDataSet();

        // Performs a principal components analysis.
        PrincipalComponents pcaEvaluator = new PrincipalComponents();

        // Sets the amount of variance to account for when retaining principal
        // components.
        pcaEvaluator.setVarianceCovered(1.0);
        // Sets maximum number of attributes to include in transformed attribute
        // names.
        pcaEvaluator.setMaximumAttributeNames(-1);

        // Scaled X such that the variance of each feature is 1.
        boolean scale = true;
        if (scale) {
            pcaEvaluator.setCenterData(true);
        } else {
            pcaEvaluator.setCenterData(false);
        }

        // Ranking the attributes.
        Ranker ranker = new Ranker();

        ranker.setNumToSelect(-1);

        AttributeSelection selector = new AttributeSelection();
        selector.setSearch(ranker);
        selector.setEvaluator(pcaEvaluator);
        selector.SelectAttributes(data);

        // Transform data into eigenvector basis.
        Instances transformedData = selector.reduceDimensionality(data);
        PrintStream o = new PrintStream(new File("data/" + "PCAResults" + ".txt"));
        System.setOut(o);
        System.out.println(Arrays.toString(selector.rankedAttributes()));
        System.out.println(Arrays.toString(selector.selectedAttributes()));
        //System.out.println(selector.CVResultsString());
        System.out.println(selector.toResultsString());

        System.out.println();

    } catch (IllegalArgumentException e) {
        System.err.println("Error");
    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:jjj.asap.sas.models1.job.BuildBasicMetaCostModels.java

License:Open Source License

@Override
protected void run() throws Exception {

    // validate args
    if (!Bucket.isBucket("datasets", inputBucket)) {
        throw new FileNotFoundException(inputBucket);
    }/*from   ww w .java 2s . com*/
    if (!Bucket.isBucket("models", outputBucket)) {
        throw new FileNotFoundException(outputBucket);
    }

    // create prototype classifiers
    Map<String, Classifier> prototypes = new HashMap<String, Classifier>();

    // Bagged REPTrees

    Bagging baggedTrees = new Bagging();
    baggedTrees.setNumExecutionSlots(1);
    baggedTrees.setNumIterations(100);
    baggedTrees.setClassifier(new REPTree());
    baggedTrees.setCalcOutOfBag(false);

    prototypes.put("Bagged-REPTrees", baggedTrees);

    // Bagged SMO

    Bagging baggedSVM = new Bagging();
    baggedSVM.setNumExecutionSlots(1);
    baggedSVM.setNumIterations(100);
    baggedSVM.setClassifier(new SMO());
    baggedSVM.setCalcOutOfBag(false);

    prototypes.put("Bagged-SMO", baggedSVM);

    // Meta Cost model for Naive Bayes

    Bagging bagging = new Bagging();
    bagging.setNumExecutionSlots(1);
    bagging.setNumIterations(100);
    bagging.setClassifier(new NaiveBayes());

    CostSensitiveClassifier meta = new CostSensitiveClassifier();
    meta.setClassifier(bagging);
    meta.setMinimizeExpectedCost(true);

    prototypes.put("CostSensitive-MinimizeExpectedCost-NaiveBayes", bagging);

    // init multi-threading
    Job.startService();
    final Queue<Future<Object>> queue = new LinkedList<Future<Object>>();

    // get the input from the bucket
    List<String> names = Bucket.getBucketItems("datasets", this.inputBucket);
    for (String dsn : names) {

        // for each prototype classifier
        for (Map.Entry<String, Classifier> prototype : prototypes.entrySet()) {

            // 
            // speical logic for meta cost
            //

            Classifier alg = AbstractClassifier.makeCopy(prototype.getValue());

            if (alg instanceof CostSensitiveClassifier) {

                int essaySet = Contest.getEssaySet(dsn);

                String matrix = Contest.getRubrics(essaySet).size() == 3 ? "cost3.txt" : "cost4.txt";

                ((CostSensitiveClassifier) alg)
                        .setCostMatrix(new CostMatrix(new FileReader("/asap/sas/trunk/" + matrix)));

            }

            // use InfoGain to discard useless attributes

            AttributeSelectedClassifier classifier = new AttributeSelectedClassifier();

            classifier.setEvaluator(new InfoGainAttributeEval());

            Ranker ranker = new Ranker();
            ranker.setThreshold(0.0001);
            classifier.setSearch(ranker);

            classifier.setClassifier(alg);

            queue.add(Job.submit(
                    new ModelBuilder(dsn, "InfoGain-" + prototype.getKey(), classifier, this.outputBucket)));
        }
    }

    // wait on complete
    Progress progress = new Progress(queue.size(), this.getClass().getSimpleName());
    while (!queue.isEmpty()) {
        try {
            queue.remove().get();
        } catch (Exception e) {
            Job.log("ERROR", e.toString());
        }
        progress.tick();
    }
    progress.done();
    Job.stopService();

}

From source file:jjj.asap.sas.models1.job.BuildBasicModels.java

License:Open Source License

@Override
protected void run() throws Exception {

    // validate args
    if (!Bucket.isBucket("datasets", inputBucket)) {
        throw new FileNotFoundException(inputBucket);
    }/*  w  ww . jav  a  2 s.  co  m*/
    if (!Bucket.isBucket("models", outputBucket)) {
        throw new FileNotFoundException(outputBucket);
    }

    // create prototype classifiers
    Map<String, Classifier> prototypes = new HashMap<String, Classifier>();

    // bayes

    BayesNet net = new BayesNet();
    net.setEstimator(new BMAEstimator());
    prototypes.put("BayesNet", net);

    prototypes.put("NaiveBayes", new NaiveBayes());

    // functions

    prototypes.put("RBFNetwork", new RBFNetwork());
    prototypes.put("SMO", new SMO());

    // init multi-threading
    Job.startService();
    final Queue<Future<Object>> queue = new LinkedList<Future<Object>>();

    // get the input from the bucket
    List<String> names = Bucket.getBucketItems("datasets", this.inputBucket);
    for (String dsn : names) {

        // for each prototype classifier
        for (Map.Entry<String, Classifier> prototype : prototypes.entrySet()) {

            // use InfoGain to discard useless attributes

            AttributeSelectedClassifier classifier = new AttributeSelectedClassifier();

            classifier.setEvaluator(new InfoGainAttributeEval());

            Ranker ranker = new Ranker();
            ranker.setThreshold(0.0001);
            classifier.setSearch(ranker);

            classifier.setClassifier(AbstractClassifier.makeCopy(prototype.getValue()));

            queue.add(Job.submit(
                    new ModelBuilder(dsn, "InfoGain-" + prototype.getKey(), classifier, this.outputBucket)));
        }
    }

    // wait on complete
    Progress progress = new Progress(queue.size(), this.getClass().getSimpleName());
    while (!queue.isEmpty()) {
        try {
            queue.remove().get();
        } catch (Exception e) {
            Job.log("ERROR", e.toString());
        }
        progress.tick();
    }
    progress.done();
    Job.stopService();

}

From source file:jjj.asap.sas.models1.job.BuildBasicModels2.java

License:Open Source License

@Override
protected void run() throws Exception {

    // validate args
    if (!Bucket.isBucket("datasets", inputBucket)) {
        throw new FileNotFoundException(inputBucket);
    }/*from   w w  w.  j ava  2  s  .c o  m*/
    if (!Bucket.isBucket("models", outputBucket)) {
        throw new FileNotFoundException(outputBucket);
    }

    // create prototype classifiers
    Map<String, Classifier> prototypes = new HashMap<String, Classifier>();

    // models

    prototypes.put("NBTree", new NBTree());
    prototypes.put("Logistic", new Logistic());

    // init multi-threading
    Job.startService();
    final Queue<Future<Object>> queue = new LinkedList<Future<Object>>();

    // get the input from the bucket
    List<String> names = Bucket.getBucketItems("datasets", this.inputBucket);
    for (String dsn : names) {

        // for each prototype classifier
        for (Map.Entry<String, Classifier> prototype : prototypes.entrySet()) {

            // use InfoGain to discard useless attributes

            AttributeSelectedClassifier classifier = new AttributeSelectedClassifier();

            classifier.setEvaluator(new InfoGainAttributeEval());

            Ranker ranker = new Ranker();
            ranker.setThreshold(0.0001);
            classifier.setSearch(ranker);

            classifier.setClassifier(AbstractClassifier.makeCopy(prototype.getValue()));

            queue.add(Job.submit(
                    new ModelBuilder(dsn, "InfoGain-" + prototype.getKey(), classifier, this.outputBucket)));
        }
    }

    // wait on complete
    Progress progress = new Progress(queue.size(), this.getClass().getSimpleName());
    while (!queue.isEmpty()) {
        try {
            queue.remove().get();
        } catch (Exception e) {
            Job.log("ERROR", e.toString());
        }
        progress.tick();
    }
    progress.done();
    Job.stopService();

}

From source file:jjj.asap.sas.models1.job.BuildRBFKernelModels.java

License:Open Source License

@Override
protected void run() throws Exception {

    // validate args
    if (!Bucket.isBucket("datasets", inputBucket)) {
        throw new FileNotFoundException(inputBucket);
    }//  ww w.j  a  v  a2  s .  com
    if (!Bucket.isBucket("models", outputBucket)) {
        throw new FileNotFoundException(outputBucket);
    }

    // init multi-threading
    Job.startService();
    final Queue<Future<Object>> queue = new LinkedList<Future<Object>>();

    // get the input from the bucket
    List<String> names = Bucket.getBucketItems("datasets", this.inputBucket);
    for (String dsn : names) {

        SMO smo = new SMO();
        smo.setFilterType(new SelectedTag(SMO.FILTER_NONE, SMO.TAGS_FILTER));
        smo.setBuildLogisticModels(true);
        RBFKernel kernel = new RBFKernel();
        kernel.setGamma(0.05);
        smo.setKernel(kernel);

        AttributeSelectedClassifier asc = new AttributeSelectedClassifier();
        asc.setEvaluator(new InfoGainAttributeEval());
        Ranker ranker = new Ranker();
        ranker.setThreshold(0.01);
        asc.setSearch(ranker);
        asc.setClassifier(smo);

        queue.add(Job.submit(new ModelBuilder(dsn, "InfoGain-SMO-RBFKernel", asc, this.outputBucket)));
    }

    // wait on complete
    Progress progress = new Progress(queue.size(), this.getClass().getSimpleName());
    while (!queue.isEmpty()) {
        try {
            queue.remove().get();
        } catch (Exception e) {
            Job.log("ERROR", e.toString());
        }
        progress.tick();
    }
    progress.done();
    Job.stopService();

}

From source file:mao.datamining.DataSetPair.java

/**
 * Pre-Process the training data set with:
 * RemoveUselessColumnsByMissingValues filter
 * SpreadSubsample filter to shrink the majority class instances 
 * AttributeSelection filter with CfsSubsetEval and LinearForwardSelection
 *//*from ww  w .j  a v a  2  s  .  co m*/
private void processTrainRawData() {
    System.out.println("====================" + this.trainFileName + "====================");
    System.out.println("====================" + this.trainFileName + "====================");
    System.out.println("====================" + this.trainFileName + "====================");
    finalTrainAttrList.clear();
    try {
        doItOnce4All();
        String sampleFilePath = null;
        //step 2, either over sample, or under sample
        //weka.filters.supervised.instance.SpreadSubsample
        if (this.resampleMethod.equalsIgnoreCase(resampleUnder)) {
            System.out.println("Under Samplessssssssssssssssssssssssssssssssssssss");
            sampleFilePath = Main.OrangeProcessedDSHome + "/afterUnderSampling.arff";
        } else if (resampleMethod.equalsIgnoreCase(resampleOver)) {
            System.out.println("Over Samplessssssssssssssssssssssssssssssssssssss");
            sampleFilePath = Main.OrangeProcessedDSHome + "/afterOverSampling.arff";
        } else if (resampleMethod.equalsIgnoreCase(resampleNone)) {
            //do nothing,
            System.out.println("None Samplessssssssssssssssssssssssssssssssssssss");
            sampleFilePath = Main.OrangeProcessedDSHome + "/afterNoneSampling.arff";
        } else if (resampleMethod.equalsIgnoreCase(resampleMatrix)) {
            //do nothing
            System.out.println("Matrix Samplessssssssssssssssssssssssssssssssssssss");
            sampleFilePath = Main.OrangeProcessedDSHome + "/afterNoneSampling.arff";
        } else {
            doNotSupport();
        }
        Instances newData = ConverterUtils.DataSource.read(sampleFilePath);
        newData.setClassIndex(newData.numAttributes() - 1);
        //            Main.logging("== New Data After Resampling class instances: ===\n" + newData.toSummaryString());

        //Step 3, select features
        AttributeSelection attrSelectionFilter = new AttributeSelection();
        ASEvaluation eval = null;
        ASSearch search = null;

        //ranker
        if (this.featureSelectionMode.equalsIgnoreCase(featureSelectionA)) {
            System.out.println("Ranker ssssssssssssssssssssssssssssssssssssss");
            System.out.println("Ranker ssssssssssssssssssssssssssssssssssssss");
            System.out.println("Ranker ssssssssssssssssssssssssssssssssssssss");
            eval = new weka.attributeSelection.InfoGainAttributeEval();
            //weka.attributeSelection.Ranker -T 0.02 -N -1
            search = new Ranker();
            String rankerOptios[] = { "-T", "0.01", "-N", "-1" };
            if (resampleMethod.equalsIgnoreCase(resampleOver)) {
                rankerOptios[1] = "0.1";
            }
            ((Ranker) search).setOptions(rankerOptios);
            Main.logging("== Start to Select Features with InfoGainAttributeEval and Ranker");
        }
        //weka.attributeSelection.LinearForwardSelection -D 0 -N 5 -I -K 50 -T 0
        else if (this.featureSelectionMode.equalsIgnoreCase(featureSelectionB)) {
            System.out.println("CfsSubset ssssssssssssssssssssssssssssssssssssss");
            System.out.println("CfsSubset ssssssssssssssssssssssssssssssssssssss");
            System.out.println("CfsSubset ssssssssssssssssssssssssssssssssssssss");
            eval = new CfsSubsetEval();
            search = new LinearForwardSelection();
            String linearOptios[] = { "-D", "0", "-N", "5", "-I", "-K", "50", "-T", "0" };
            ((LinearForwardSelection) search).setOptions(linearOptios);
            Main.logging("== Start to Select Features with CfsSubsetEval and LinearForwardSelection");
        } else if (this.featureSelectionMode.equalsIgnoreCase(featureSelectionNo)) {
            System.out.println("None Selection ssssssssssssssssssssssssssssssssssssss");
            Main.logging("No Feature Selection Method");
        } else {
            doNotSupport();
        }

        if (eval != null) {
            attrSelectionFilter.setEvaluator(eval);
            attrSelectionFilter.setSearch(search);
            attrSelectionFilter.setInputFormat(newData);
            newData = Filter.useFilter(newData, attrSelectionFilter);
        }

        Main.logging("== New Data After Selecting Features: ===\n" + newData.toSummaryString());

        //finally, write the final dataset to file system

        try (BufferedWriter writer = new BufferedWriter(
                new OutputStreamWriter(new FileOutputStream(this.trainFileName)))) {
            writer.write(newData.toString());
        }

        int numAttributes = newData.numAttributes();
        for (int i = 0; i < numAttributes; i++) {
            String attrName = newData.attribute(i).name();
            finalTrainAttrList.add(attrName);
        }
        Main.logging(finalTrainAttrList.toString());
        //            //set the final train dataset
        finalTrainDataSet = newData;
        finalTrainDataSet.setClassIndex(finalTrainDataSet.numAttributes() - 1);

        Main.logging("train dataset class attr: " + finalTrainDataSet.classAttribute().toString());
    } catch (Exception ex) {
        Main.logging(null, ex);
    }

}

From source file:moa.reduction.core.ReductionClassifier.java

License:Open Source License

private Instance performFS(Instance rinst) {
    // Feature selection process performed before
    weka.core.Instance winst = new weka.core.DenseInstance(rinst.weight(), rinst.toDoubleArray());

    if (fselector != null) {
        if (fselector.isUpdated() && totalCount % winSizeOption.getValue() == 0) {
            fselector.applySelection();//ww  w  .java 2  s  .co m
            selector = new AttributeSelection();
            Ranker ranker = new Ranker();
            ranker.setNumToSelect(Math.min(numFeaturesOption.getValue(), winst.numAttributes() - 1));
            selector.setEvaluator((ASEvaluation) fselector);
            selector.setSearch(ranker);

            ArrayList<Attribute> list = new ArrayList<Attribute>();
            //ArrayList<Attribute> list = Collections.list(winst.enumerateAttributes());
            //list.add(winst.classAttribute());
            for (int i = 0; i < rinst.numAttributes(); i++)
                list.add(new Attribute(rinst.attribute(i).name(), i));
            //ArrayList<Attribute> list = Collections.list(winst.enumerateAttributes());
            //list.add(winst.classAttribute());
            weka.core.Instances single = new weka.core.Instances("single", list, 1);
            single.setClassIndex(rinst.classIndex());
            single.add(winst);
            try {
                selector.SelectAttributes(single);
                System.out.println("Selected features: " + selector.toResultsString());
                selectedFeatures.clear();
                for (int att : selector.selectedAttributes())
                    selectedFeatures.add(att);
                WekaToSamoaInstanceConverter convWS = new WekaToSamoaInstanceConverter();
                return convWS.samoaInstance(selector.reduceDimensionality(winst));
            } catch (Exception e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }
    }
    return rinst;
}

From source file:mulan.classifier.transformation.MultiLabelStacking.java

License:Open Source License

/**
 * Initializes all the parameters used in the meta-level.
 * Calculates the correlated labels if meta-level pruning is applied.
 *
 * @param dataSet//from  w w  w. j  a va2  s  .co  m
 * @param metaClassifier
 * @param includeAttrs
 * @param metaPercentage
 * @param eval
 * @throws Exception
 */
public void initializeMetaLevel(MultiLabelInstances dataSet, Classifier metaClassifier, boolean includeAttrs,
        double metaPercentage, ASEvaluation eval) throws Exception {
    this.metaClassifier = metaClassifier;
    metaLevelEnsemble = AbstractClassifier.makeCopies(metaClassifier, numLabels);
    metaLevelData = new Instances[numLabels];
    metaLevelFilteredEnsemble = new FilteredClassifier[numLabels];
    this.includeAttrs = includeAttrs;
    // calculate the number of correlated labels that corresponds to the
    // given percentage
    topkCorrelated = (int) Math.floor(metaPercentage * numLabels);
    if (topkCorrelated < 1) {
        debug("Too small percentage, selecting k=1");
        topkCorrelated = 1;
    }
    if (topkCorrelated < numLabels) {// pruning should be applied
        selectedAttributes = new int[numLabels][];
        if (eval == null) {// calculate the PhiCoefficient
            Statistics phi = new Statistics();
            phi.calculatePhi(dataSet);
            for (int i = 0; i < numLabels; i++) {
                selectedAttributes[i] = phi.topPhiCorrelatedLabels(i, topkCorrelated);
            }
        } else {// apply feature selection
            AttributeSelection attsel = new AttributeSelection();
            Ranker rankingMethod = new Ranker();
            rankingMethod.setNumToSelect(topkCorrelated);
            attsel.setEvaluator(eval);
            attsel.setSearch(rankingMethod);
            // create a dataset consisting of all the classes of each
            // instance plus the class we want to select attributes from
            for (int i = 0; i < numLabels; i++) {
                ArrayList<Attribute> attributes = new ArrayList<Attribute>();

                for (int j = 0; j < numLabels; j++) {
                    attributes.add(train.attribute(labelIndices[j]));
                }
                attributes.add(train.attribute(labelIndices[i]).copy("meta"));

                Instances iporesult = new Instances("Meta format", attributes, 0);
                iporesult.setClassIndex(numLabels);
                for (int k = 0; k < train.numInstances(); k++) {
                    double[] values = new double[numLabels + 1];
                    for (int m = 0; m < numLabels; m++) {
                        values[m] = Double.parseDouble(train.attribute(labelIndices[m])
                                .value((int) train.instance(k).value(labelIndices[m])));
                    }
                    values[numLabels] = Double.parseDouble(train.attribute(labelIndices[i])
                            .value((int) train.instance(k).value(labelIndices[i])));
                    Instance metaInstance = DataUtils.createInstance(train.instance(k), 1, values);
                    metaInstance.setDataset(iporesult);
                    iporesult.add(metaInstance);
                }
                attsel.SelectAttributes(iporesult);
                selectedAttributes[i] = attsel.selectedAttributes();
                iporesult.delete();
            }
        }
    }
}