Example usage for weka.classifiers.functions SMO sparseIndices

List of usage examples for weka.classifiers.functions SMO sparseIndices

Introduction

In this page you can find the example usage for weka.classifiers.functions SMO sparseIndices.

Prototype

public int[][][] sparseIndices() 

Source Link

Document

Returns the indices in sparse format.

Usage

From source file:etc.aloe.cscw2013.SMOFeatureWeighting.java

License:Open Source License

@Override
public List<Entry<String, Double>> getFeatureWeights(ExampleSet trainingExamples, Model model) {
    WekaModel wekaModel = (WekaModel) model;
    Classifier classifier = wekaModel.getClassifier();
    Instances dataFormat = trainingExamples.getInstances();

    SMO smo = getSMO(classifier);

    double[] sparseWeights = smo.sparseWeights()[0][1];
    int[] sparseIndices = smo.sparseIndices()[0][1];

    Map<String, Double> weights = new HashMap<String, Double>();
    for (int i = 0; i < sparseWeights.length; i++) {
        int index = sparseIndices[i];
        double weight = sparseWeights[i];
        String name = dataFormat.attribute(index).name();
        weights.put(name, weight);//from w  ww .  jav a  2  s  .c om
    }

    List<Map.Entry<String, Double>> entries = new ArrayList<Map.Entry<String, Double>>(weights.entrySet());

    Collections.sort(entries, new Comparator<Map.Entry<String, Double>>() {
        @Override
        public int compare(Map.Entry<String, Double> o1, Map.Entry<String, Double> o2) {
            return o1.getKey().compareTo(o2.getKey());
        }
    });

    return entries;
}

From source file:KFST.featureSelection.embedded.SVMBasedMethods.MSVM_RFE.java

License:Open Source License

/**
 * generates binary classifiers (SVM by applying k-fold cross validation
 * resampling strategy) using input data and based on selected feature
 * subset.//from w w  w.ja  v  a  2  s  . c  o m
 *
 * @param selectedFeature an array of indices of the selected feature subset
 *
 * @return an array of the weights of features
 */
protected double[][] buildSVM_KFoldCrossValidation(int[] selectedFeature) {
    double[][] weights = new double[numRun * kFoldValue][selectedFeature.length];
    int classifier = 0;

    for (int i = 0; i < numRun; i++) {
        double[][] copyTrainSet = ArraysFunc.copyDoubleArray2D(trainSet);

        //shuffles the train set
        MathFunc.randomize(copyTrainSet);

        int numSampleInFold = copyTrainSet.length / kFoldValue;
        int remainder = copyTrainSet.length % kFoldValue;
        int indexStart = 0;
        for (int k = 0; k < kFoldValue; k++) {
            int indexEnd = indexStart + numSampleInFold;
            if (k < remainder) {
                indexEnd++;
            }
            double[][] subTrainSet = ArraysFunc.copyDoubleArray2D(copyTrainSet, indexStart, indexEnd);

            String nameDataCSV = TEMP_PATH + "dataCSV[" + i + "-" + k + "].csv";
            String nameDataARFF = TEMP_PATH + "dataARFF[" + i + "-" + k + "].arff";

            FileFunc.createCSVFile(subTrainSet, selectedFeature, nameDataCSV, nameFeatures, classLabel);
            FileFunc.convertCSVtoARFF(nameDataCSV, nameDataARFF, TEMP_PATH, selectedFeature.length, numFeatures,
                    nameFeatures, numClass, classLabel);

            try {
                BufferedReader readerTrain = new BufferedReader(new FileReader(nameDataARFF));
                Instances dataTrain = new Instances(readerTrain);
                readerTrain.close();
                dataTrain.setClassIndex(dataTrain.numAttributes() - 1);

                SMO svm = new SMO();
                svm.setC(parameterC);
                svm.setKernel(WekaSVMKernel.parse(kernelType));
                svm.buildClassifier(dataTrain);

                double[] weightsSparse = svm.sparseWeights()[0][1];
                int[] indicesSparse = svm.sparseIndices()[0][1];
                for (int m = 0; m < weightsSparse.length; m++) {
                    weights[classifier][indicesSparse[m]] = weightsSparse[m];
                }
            } catch (Exception ex) {
                Logger.getLogger(MSVM_RFE.class.getName()).log(Level.SEVERE, null, ex);
            }

            indexStart = indexEnd;
            classifier++;
        }
    }

    return weights;
}

From source file:KFST.featureSelection.embedded.SVMBasedMethods.SVMBasedMethods.java

License:Open Source License

/**
 * generates binary classifiers (SVM) using input data and based on selected
 * feature subset, and finally returns the weights of features.
 * One-Versus-One strategy is used to construct classifiers in multiclass
 * classification.//from w  w  w. jav  a  2s .co m
 *
 * @param selectedFeature an array of indices of the selected feature subset
 *
 * @return an array of the weights of features
 */
protected double[][][] buildSVM_OneAgainstOne(int[] selectedFeature) {
    String nameDataCSV = TEMP_PATH + "dataCSV.csv";
    String nameDataARFF = TEMP_PATH + "dataARFF.arff";
    double[][][] weights = new double[numClass][numClass][selectedFeature.length];

    FileFunc.createCSVFile(trainSet, selectedFeature, nameDataCSV, nameFeatures, classLabel);
    FileFunc.convertCSVtoARFF(nameDataCSV, nameDataARFF, TEMP_PATH, selectedFeature.length, numFeatures,
            nameFeatures, numClass, classLabel);

    try {
        BufferedReader readerTrain = new BufferedReader(new FileReader(nameDataARFF));
        Instances dataTrain = new Instances(readerTrain);
        readerTrain.close();
        dataTrain.setClassIndex(dataTrain.numAttributes() - 1);

        SMO svm = new SMO();
        svm.setC(parameterC);
        svm.setKernel(WekaSVMKernel.parse(kernelType));
        svm.buildClassifier(dataTrain);

        for (int i = 0; i < numClass; i++) {
            for (int j = i + 1; j < numClass; j++) {
                double[] weightsSparse = svm.sparseWeights()[i][j];
                int[] indicesSparse = svm.sparseIndices()[i][j];
                for (int k = 0; k < weightsSparse.length; k++) {
                    weights[i][j][indicesSparse[k]] = weightsSparse[k];
                }
            }
        }
    } catch (Exception ex) {
        Logger.getLogger(SVMBasedMethods.class.getName()).log(Level.SEVERE, null, ex);
    }

    return weights;
}

From source file:KFST.featureSelection.embedded.SVMBasedMethods.SVMBasedMethods.java

License:Open Source License

/**
 * generates binary classifiers (SVM) using input data and based on selected
 * feature subset, and finally returns the weights of features.
 * One-Versus-All strategy is used to construct classifiers in multiclass
 * classification./*from   w  ww .j  a v a2  s  .com*/
 *
 * @param selectedFeature an array of indices of the selected feature subset
 *
 * @return an array of the weights of features
 */
protected double[][] buildSVM_OneAgainstRest(int[] selectedFeature) {
    double[][] weights = new double[numClass][selectedFeature.length];
    String[] tempClassLabel = new String[] { "c1", "c2" };

    for (int indexClass = 0; indexClass < numClass; indexClass++) {
        double[][] copyTrainSet = ArraysFunc.copyDoubleArray2D(trainSet);
        String nameDataCSV = TEMP_PATH + "dataCSV" + indexClass + ".csv";
        String nameDataARFF = TEMP_PATH + "dataARFF" + indexClass + ".arff";

        for (double[] dataRow : copyTrainSet) {
            if (dataRow[numFeatures] == classLabelInTrainSet[indexClass]) {
                dataRow[numFeatures] = 0;
            } else {
                dataRow[numFeatures] = 1;
            }
        }

        FileFunc.createCSVFile(copyTrainSet, selectedFeature, nameDataCSV, nameFeatures, tempClassLabel);
        FileFunc.convertCSVtoARFF(nameDataCSV, nameDataARFF, TEMP_PATH, selectedFeature.length, numFeatures,
                nameFeatures, tempClassLabel.length, tempClassLabel);

        try {
            BufferedReader readerTrain = new BufferedReader(new FileReader(nameDataARFF));
            Instances dataTrain = new Instances(readerTrain);
            readerTrain.close();
            dataTrain.setClassIndex(dataTrain.numAttributes() - 1);

            SMO svm = new SMO();
            svm.setC(parameterC);
            svm.setKernel(WekaSVMKernel.parse(kernelType));
            svm.buildClassifier(dataTrain);

            double[] weightsSparse = svm.sparseWeights()[0][1];
            int[] indicesSparse = svm.sparseIndices()[0][1];
            for (int k = 0; k < weightsSparse.length; k++) {
                weights[indexClass][indicesSparse[k]] = weightsSparse[k];
            }
        } catch (Exception ex) {
            Logger.getLogger(SVMBasedMethods.class.getName()).log(Level.SEVERE, null, ex);
        }
    }

    return weights;
}