Example usage for weka.core Instances attribute

List of usage examples for weka.core Instances attribute

Introduction

In this page you can find the example usage for weka.core Instances attribute.

Prototype

publicAttribute attribute(String name) 

Source Link

Document

Returns an attribute given its name.

Usage

From source file:Helper.CustomFilter.java

public String convertToFit(String value, Instances data, int index) {
    int i;/*from   ww w.j a  va  2s.  co  m*/
    String threshold = data.attribute(index).value(0);
    for (i = 0; i < data.numDistinctValues(data.attribute(index)); i++) {
        if (Float.valueOf(value) < Float.valueOf(data.attribute(index).value(i))) {
            value = threshold;
            return value;
        }
        threshold = data.attribute(index).value(i);
    }
    value = threshold;
    return value;
}

From source file:id3.MyID3.java

/**
 * Algoritma pohon keputusan// w ww . ja  v  a  2  s  .  com
 * @param instances data train
 * @param attributes remaining attributes
 * @throws Exception
 */
public void buildMyID3(Instances instances, ArrayList<Attribute> attributes) throws Exception {
    // Check if no instances have reached this node.
    if (instances.numInstances() == 0) {
        classAttribute = null;
        classLabel = Instance.missingValue();
        classDistributionAmongInstances = new double[instances.numClasses()];
        return;
    }
    // Check if all instances only contain one class label
    if (computeEntropy(instances) == 0) {
        currentAttribute = null;
        classDistributionAmongInstances = classDistribution(instances);
        // Labelling process at node
        for (int i = 0; i < classDistributionAmongInstances.length; i++) {
            if (classDistributionAmongInstances[i] > 0) {
                classLabel = i;
                break;
            }
        }
        classAttribute = instances.classAttribute();
        Utils.normalize(classDistributionAmongInstances);
    } else {
        // Compute infogain for each attribute
        double[] infoGainAttribute = new double[instances.numAttributes()];
        for (int i = 0; i < instances.numAttributes(); i++) {
            infoGainAttribute[i] = computeIG(instances, instances.attribute(i));
        }
        // Choose attribute with maximum information gain
        int indexMaxInfoGain = 0;
        double maximumInfoGain = 0.0;
        for (int i = 0; i < (infoGainAttribute.length - 1); i++) {
            if (infoGainAttribute[i] > maximumInfoGain) {
                maximumInfoGain = infoGainAttribute[i];
                indexMaxInfoGain = i;
            }
        }
        currentAttribute = instances.attribute(indexMaxInfoGain);
        // Delete current attribute from remaining attribute
        ArrayList<Attribute> remainingAttributes = attributes;
        if (!remainingAttributes.isEmpty()) {
            int indexAttributeDeleted = 0;
            for (int i = 0; i < remainingAttributes.size(); i++) {
                if (remainingAttributes.get(i).index() == currentAttribute.index()) {
                    indexAttributeDeleted = i;
                }
            }
            remainingAttributes.remove(indexAttributeDeleted);
        }
        // Split instances based on currentAttribute (create branch new node)
        Instances[] instancesSplitBasedAttribute = splitData(instances, currentAttribute);
        subTree = new MyID3[currentAttribute.numValues()];
        for (int i = 0; i < currentAttribute.numValues(); i++) {
            if (instancesSplitBasedAttribute[i].numInstances() == 0) {
                // Handle empty examples at nodes
                double[] currentClassDistribution = classDistribution(instances);
                classLabel = 0.0;
                double counterDistribution = 0.0;
                for (int j = 0; j < currentClassDistribution.length; j++) {
                    if (currentClassDistribution[j] > counterDistribution) {
                        classLabel = j;
                    }
                }
                classAttribute = instances.classAttribute();
            } else {
                subTree[i] = new MyID3();
                subTree[i].buildMyID3(instancesSplitBasedAttribute[i], remainingAttributes);
            }
        }
    }
}

From source file:id3classifier.ID3Classifiers.java

@Override
public void buildClassifier(Instances instances) throws Exception {

    // create list of instances of size instances' number of instances
    // create list of attributes of size instances' number of attributes
    List<Instance> instanceList = new ArrayList<>(instances.numInstances());
    List<Attribute> attributeList = new ArrayList<>(instances.numAttributes());

    // from index 0 to instances' number of instances, add instances' current
    // instance to the list of instances... mouthfull
    for (int i = 0; i < instances.numInstances(); i++) {

        instanceList.add(instances.instance(i));
    }/*from w ww  .  j  a v  a2  s. co  m*/

    // from index 0 to instances' number of attributes, if the index is not
    // equal to instances' class index... 
    for (int i = 0; i < instances.numAttributes(); i++) {

        if (i != instances.classIndex()) {

            // add instances' current attribute to the attribute list
            attributeList.add(instances.attribute(i));
        }
    }

    // set tree equal to the tree built by buildTree() using the instance
    // list and the attribute list
    tree = buildTree(instanceList, attributeList);
}

From source file:imba.classifier.NBTubes.java

@Override
public void buildClassifier(Instances data) {
    dataClassifier = new ArrayList<>();
    infoClassifier = new ArrayList<>();
    validAttribute = new ArrayList<>();
    dataset = null;/*www . ja  v a 2  s . c  o  m*/
    sumClass = null;
    dataSize = 0;
    header_Instances = data;

    Filter f;
    int i, j, k, l, m;
    int sumVal;

    int numAttr = data.numAttributes(); //ini beserta kelasnya, jadi atribut + 1

    i = 0;
    while (i < numAttr && wasNumeric == false) {
        if (i == classIdx) {
            i++;
        }

        if (i != numAttr && data.attribute(i).isNumeric()) {
            wasNumeric = true;
        }

        i++;
    }

    Instance p;

    //kasih filter
    if (wasNumeric) {
        f = new Normalize();
        //Filter f = new NumericToNominal();
        try {
            f.setInputFormat(data);

            for (Instance i1 : data) {
                f.input(i1);
            }

            f.batchFinished();
        } catch (Exception ex) {
            Logger.getLogger(NBTubes.class.getName()).log(Level.SEVERE, null, ex);
        }

        dataset = f.getOutputFormat();

        while ((p = f.output()) != null) {
            dataset.add(p);
        }
    }

    //f = new NumericToNominal();
    if (filter.equals("Discretize")) {
        f = new Discretize();
    } else {
        f = new NumericToNominal();
    }

    try {
        if (wasNumeric) {
            f.setInputFormat(dataset);
            for (Instance i1 : dataset) {
                f.input(i1);
            }
        } else {
            f.setInputFormat(data);
            for (Instance i1 : data) {
                f.input(i1);
            }
        }

        f.batchFinished();
    } catch (Exception ex) {
        Logger.getLogger(NBTubes.class.getName()).log(Level.SEVERE, null, ex);
    }

    dataset = null;
    dataset = f.getOutputFormat();

    while ((p = f.output()) != null) {
        dataset.add(p);
    }

    //building data structure
    classIdx = data.classIndex();

    dataSize = data.size();

    //isi data dan info classifier dengan array kosong
    i = 0;
    j = i;
    while (j < numAttr) {
        if (i == classIdx) {
            i++;
        } else {
            dataClassifier.add(new ArrayList<>());
            infoClassifier.add(new ArrayList<>());

            if (j < i) {
                m = j - 1;
            } else {
                m = j;
            }

            k = 0;
            while (k < dataset.attribute(j).numValues()) {
                dataClassifier.get(m).add(new ArrayList<>());
                infoClassifier.get(m).add(new ArrayList<>());

                l = 0;
                while (l < dataset.attribute(classIdx).numValues()) {
                    dataClassifier.get(m).get(k).add(0);
                    infoClassifier.get(m).get(k).add(0.0);

                    l++;
                }

                k++;
            }
        }

        i++;
        j++;
    }

    //isi data classifier dari dataset
    sumClass = new int[data.numClasses()];

    i = 0;
    while (i < dataset.size()) {
        j = 0;
        k = j;
        while (k < dataset.numAttributes()) {
            if (j == classIdx) {
                j++;
            } else {
                if (k < j) {
                    m = k - 1;
                } else {
                    m = k;
                }

                dataClassifier.get(m).get((int) dataset.get(i).value(k)).set(
                        (int) dataset.get(i).value(classIdx),
                        dataClassifier.get(m).get((int) dataset.get(i).value(k))
                                .get((int) dataset.get(i).value(classIdx)) + 1);

                if (m == 0) {
                    sumClass[(int) dataset.get(i).value(classIdx)]++;
                }

            }

            k++;
            j++;
        }

        i++;
    }

    //proses double values
    i = 0;
    while (i < dataClassifier.size()) {
        j = 0;
        while (j < dataClassifier.get(i).size()) {
            k = 0;
            while (k < dataClassifier.get(i).get(j).size()) {
                infoClassifier.get(i).get(j).set(k, (double) dataClassifier.get(i).get(j).get(k) / sumClass[k]);

                k++;
            }

            j++;
        }

        i++;
    }

    /*
    //liat apakah ada nilai di tiap atribut
    //yang merepresentasikan lebih dari 80% data
    i = 0;
    while (i < dataClassifier.size()) {
    j = 0;
    while (j < dataClassifier.get(i).size()) {
                
                
        j++;
    }
            
    i++;
    }
    */
}

From source file:iris.ID3.java

public void makeLikeAWhat(Instances instances) {
    // Create storage for different info gains
    double[] infoGains = new double[instances.numAttributes()];
    // Enumerate through attributes to find the best gain
    Enumeration attributeEnum = instances.enumerateAttributes();
    while (attributeEnum.hasMoreElements()) {
        // Loop through attributes, adding gain to infoGains array
        Attribute att = (Attribute) attributeEnum.nextElement();
        infoGains[att.index()] = infoGain(instances, att);
    }// ww  w .  ja  va 2 s .  c  o  m
    // Use maxIndex to find the highest info gain in the array
    highestInfoGain = instances.attribute(Utils.maxIndex(infoGains));

    // Make a leaf if there is no more info to gain
    // Otherwise, create children
    // Check if there is no more info to gain
    if (Utils.eq(infoGains[highestInfoGain.index()], 0)) {
        highestInfoGain = null;
        // Instantiate maxDistribution
        maxDistribution = new double[instances.numClasses()];
        // Set up enumerator for instances
        Enumeration instanceEnum = instances.enumerateInstances();
        // Tally classes
        while (instanceEnum.hasMoreElements()) {
            Instance instance = (Instance) instanceEnum.nextElement();
            maxDistribution[(int) instance.classValue()]++;
        }
        // Normalize data for easier manipulation
        Utils.normalize(maxDistribution);
        // Get the max index of the distrubtion
        classValue = Utils.maxIndex(maxDistribution);
        // Save class attribute
        classAttribute = instances.classAttribute();
    }
    // Create children
    else {
        // Split best attribute into bins
        Instances[] bins = makeBins(instances, highestInfoGain);
        // Create nodes
        children = new ID3[highestInfoGain.numValues()];
        for (int i = 0; i < highestInfoGain.numValues(); i++) {
            children[i] = new ID3();
            children[i].makeLikeAWhat(bins[i]);
        }
    }
}

From source file:it.unisa.gitdm.evaluation.WekaEvaluator.java

private static void evaluateModel(String baseFolderPath, String projectName, Classifier pClassifier,
        Instances pInstances, String pModelName, String pClassifierName) throws Exception {

    // other options
    int folds = 10;

    // randomize data
    Random rand = new Random(42);
    Instances randData = new Instances(pInstances);
    randData.randomize(rand);//from   w  w  w . j a va 2s  .c o m
    if (randData.classAttribute().isNominal()) {
        randData.stratify(folds);
    }

    // perform cross-validation and add predictions
    Instances predictedData = null;
    Evaluation eval = new Evaluation(randData);

    int positiveValueIndexOfClassFeature = 0;
    for (int n = 0; n < folds; n++) {
        Instances train = randData.trainCV(folds, n);
        Instances test = randData.testCV(folds, n);
        // the above code is used by the StratifiedRemoveFolds filter, the
        // code below by the Explorer/Experimenter:
        // Instances train = randData.trainCV(folds, n, rand);

        int classFeatureIndex = 0;
        for (int i = 0; i < train.numAttributes(); i++) {
            if (train.attribute(i).name().equals("isBuggy")) {
                classFeatureIndex = i;
                break;
            }
        }

        Attribute classFeature = train.attribute(classFeatureIndex);
        for (int i = 0; i < classFeature.numValues(); i++) {
            if (classFeature.value(i).equals("TRUE")) {
                positiveValueIndexOfClassFeature = i;
            }
        }

        train.setClassIndex(classFeatureIndex);
        test.setClassIndex(classFeatureIndex);

        // build and evaluate classifier
        pClassifier.buildClassifier(train);
        eval.evaluateModel(pClassifier, test);

        // add predictions
        //           AddClassification filter = new AddClassification();
        //           filter.setClassifier(pClassifier);
        //           filter.setOutputClassification(true);
        //           filter.setOutputDistribution(true);
        //           filter.setOutputErrorFlag(true);
        //           filter.setInputFormat(train);
        //           Filter.useFilter(train, filter); 
        //           Instances pred = Filter.useFilter(test, filter); 
        //           if (predictedData == null)
        //             predictedData = new Instances(pred, 0);
        //           
        //           for (int j = 0; j < pred.numInstances(); j++)
        //             predictedData.add(pred.instance(j));
    }
    double accuracy = (eval.numTruePositives(positiveValueIndexOfClassFeature)
            + eval.numTrueNegatives(positiveValueIndexOfClassFeature))
            / (eval.numTruePositives(positiveValueIndexOfClassFeature)
                    + eval.numFalsePositives(positiveValueIndexOfClassFeature)
                    + eval.numFalseNegatives(positiveValueIndexOfClassFeature)
                    + eval.numTrueNegatives(positiveValueIndexOfClassFeature));

    double fmeasure = 2 * ((eval.precision(positiveValueIndexOfClassFeature)
            * eval.recall(positiveValueIndexOfClassFeature))
            / (eval.precision(positiveValueIndexOfClassFeature)
                    + eval.recall(positiveValueIndexOfClassFeature)));
    File wekaOutput = new File(baseFolderPath + projectName + "/predictors.csv");
    PrintWriter pw1 = new PrintWriter(wekaOutput);

    pw1.write(accuracy + ";" + eval.precision(positiveValueIndexOfClassFeature) + ";"
            + eval.recall(positiveValueIndexOfClassFeature) + ";" + fmeasure + ";"
            + eval.areaUnderROC(positiveValueIndexOfClassFeature));

    System.out.println(projectName + ";" + pClassifierName + ";" + pModelName + ";"
            + eval.numTruePositives(positiveValueIndexOfClassFeature) + ";"
            + eval.numFalsePositives(positiveValueIndexOfClassFeature) + ";"
            + eval.numFalseNegatives(positiveValueIndexOfClassFeature) + ";"
            + eval.numTrueNegatives(positiveValueIndexOfClassFeature) + ";" + accuracy + ";"
            + eval.precision(positiveValueIndexOfClassFeature) + ";"
            + eval.recall(positiveValueIndexOfClassFeature) + ";" + fmeasure + ";"
            + eval.areaUnderROC(positiveValueIndexOfClassFeature) + "\n");
}

From source file:j48.BinC45ModelSelection.java

License:Open Source License

/**
 * Selects C4.5-type split for the given dataset.
 *//*from  w ww.  jav a2s.c om*/
public final ClassifierSplitModel selectModel(Instances data) {

    double minResult;
    double currentResult;
    BinC45Split[] currentModel;
    BinC45Split bestModel = null;
    NoSplit noSplitModel = null;
    double averageInfoGain = 0;
    int validModels = 0;
    boolean multiVal = true;
    Distribution checkDistribution;
    double sumOfWeights;
    int i;

    try {

        // Check if all Instances belong to one class or if not
        // enough Instances to split.
        checkDistribution = new Distribution(data);
        noSplitModel = new NoSplit(checkDistribution);
        if (Utils.sm(checkDistribution.total(), 2 * m_minNoObj) || Utils.eq(checkDistribution.total(),
                checkDistribution.perClass(checkDistribution.maxClass())))
            return noSplitModel;

        // Check if all attributes are nominal and have a
        // lot of values.
        Enumeration enu = data.enumerateAttributes();
        while (enu.hasMoreElements()) {
            Attribute attribute = (Attribute) enu.nextElement();
            if ((attribute.isNumeric())
                    || (Utils.sm((double) attribute.numValues(), (0.3 * (double) m_allData.numInstances())))) {
                multiVal = false;
                break;
            }
        }
        currentModel = new BinC45Split[data.numAttributes()];
        sumOfWeights = data.sumOfWeights();

        // For each attribute.
        for (i = 0; i < data.numAttributes(); i++) {

            // Apart from class attribute.
            if (i != (data).classIndex()) {

                // Get models for current attribute.
                currentModel[i] = new BinC45Split(i, m_minNoObj, sumOfWeights);
                currentModel[i].buildClassifier(data);

                // Check if useful split for current attribute
                // exists and check for enumerated attributes with
                // a lot of values.
                if (currentModel[i].checkModel())
                    if ((data.attribute(i).isNumeric())
                            || (multiVal || Utils.sm((double) data.attribute(i).numValues(),
                                    (0.3 * (double) m_allData.numInstances())))) {
                        averageInfoGain = averageInfoGain + currentModel[i].infoGain();
                        validModels++;
                    }
            } else
                currentModel[i] = null;
        }

        // Check if any useful split was found.
        if (validModels == 0)
            return noSplitModel;
        averageInfoGain = averageInfoGain / (double) validModels;

        // Find "best" attribute to split on.
        minResult = 0;
        for (i = 0; i < data.numAttributes(); i++) {
            if ((i != (data).classIndex()) && (currentModel[i].checkModel()))

                // Use 1E-3 here to get a closer approximation to the
                // original
                // implementation.
                if ((currentModel[i].infoGain() >= (averageInfoGain - 1E-3))
                        && Utils.gr(currentModel[i].gainRatio(), minResult)) {
                    bestModel = currentModel[i];
                    minResult = currentModel[i].gainRatio();
                }
        }

        // Check if useful split was found.
        if (Utils.eq(minResult, 0))
            return noSplitModel;

        // Add all Instances with unknown values for the corresponding
        // attribute to the distribution for the model, so that
        // the complete distribution is stored with the model.
        bestModel.distribution().addInstWithUnknown(data, bestModel.attIndex());

        // Set the split point analogue to C45 if attribute numeric.
        bestModel.setSplitPoint(m_allData);
        return bestModel;
    } catch (Exception e) {
        e.printStackTrace();
    }
    return null;
}

From source file:j48.BinC45Split.java

License:Open Source License

/**
 * Creates a C4.5-type split on the given data.
 *
 * @exception Exception if something goes wrong
 *//*w ww  .j av a 2 s.  c o m*/
public void buildClassifier(Instances trainInstances) throws Exception {

    // Initialize the remaining instance variables.
    m_numSubsets = 0;
    m_splitPoint = Double.MAX_VALUE;
    m_infoGain = 0;
    m_gainRatio = 0;

    // Different treatment for enumerated and numeric
    // attributes.
    if (trainInstances.attribute(m_attIndex).isNominal()) {
        handleEnumeratedAttribute(trainInstances);
    } else {
        trainInstances.sort(trainInstances.attribute(m_attIndex));
        handleNumericAttribute(trainInstances);
    }
}

From source file:j48.BinC45Split.java

License:Open Source License

/**
 * Creates split on enumerated attribute.
 *
 * @exception Exception if something goes wrong
 *//*from   ww  w.ja v  a  2s . co m*/
private void handleEnumeratedAttribute(Instances trainInstances) throws Exception {

    Distribution newDistribution, secondDistribution;
    int numAttValues;
    double currIG, currGR;
    Instance instance;
    int i;

    numAttValues = trainInstances.attribute(m_attIndex).numValues();
    newDistribution = new Distribution(numAttValues, trainInstances.numClasses());

    // Only Instances with known values are relevant.
    Enumeration enu = trainInstances.enumerateInstances();
    while (enu.hasMoreElements()) {
        instance = (Instance) enu.nextElement();
        if (!instance.isMissing(m_attIndex))
            newDistribution.add((int) instance.value(m_attIndex), instance);
    }
    m_distribution = newDistribution;

    // For all values
    for (i = 0; i < numAttValues; i++) {

        if (Utils.grOrEq(newDistribution.perBag(i), m_minNoObj)) {
            secondDistribution = new Distribution(newDistribution, i);

            // Check if minimum number of Instances in the two
            // subsets.
            if (secondDistribution.check(m_minNoObj)) {
                m_numSubsets = 2;
                currIG = m_infoGainCrit.splitCritValue(secondDistribution, m_sumOfWeights);
                currGR = m_gainRatioCrit.splitCritValue(secondDistribution, m_sumOfWeights, currIG);
                if ((i == 0) || Utils.gr(currGR, m_gainRatio)) {
                    m_gainRatio = currGR;
                    m_infoGain = currIG;
                    m_splitPoint = (double) i;
                    m_distribution = secondDistribution;
                }
            }
        }
    }
}

From source file:j48.BinC45Split.java

License:Open Source License

/**
 * Prints left side of condition./*from  w w w  .j a  v a 2s.c o  m*/
 * 
 * @param data the data to get the attribute name from.
 * @return the attribute name
 */
public final String leftSide(Instances data) {

    return data.attribute(m_attIndex).name();
}