Example usage for weka.core Instance value

List of usage examples for weka.core Instance value

Introduction

In this page you can find the example usage for weka.core Instance value.

Prototype

public double value(Attribute att);

Source Link

Document

Returns an instance's attribute value in internal format.

Usage

From source file:net.sf.bddbddb.order.MyId3.java

License:LGPL

/**
 * Computes class distribution for instance using decision tree.
 * /*w  ww.j  ava2  s. com*/
 * @param instance
 *            the instance for which distribution is to be computed
 * @return the class distribution for the given instance
 */
public double[] distributionForInstance(Instance instance) throws NoSupportForMissingValuesException {
    if (m_Attribute == null) {
        return m_Distribution;
    } else if (instance.isMissing(m_Attribute)) {
        double[] d = new double[0];
        for (int i = 0; i < m_Successors.length; ++i) {
            double[] dd = m_Successors[i].distributionForInstance(instance);
            if (d.length == 0 && dd.length > 0)
                d = new double[dd.length];
            for (int j = 0; j < d.length; ++j) {
                d[j] += dd[j];
            }
        }
        for (int j = 0; j < d.length; ++j) {
            d[j] /= m_Successors.length;
        }
        return d;
    } else {
        return m_Successors[(int) instance.value(m_Attribute)].distributionForInstance(instance);
    }
}

From source file:net.sf.bddbddb.order.MyId3.java

License:LGPL

/**
 * Splits a dataset according to the values of a nominal attribute.
 * /*  w ww. jav  a 2  s .  co  m*/
 * @param data
 *            the data which is to be split
 * @param att
 *            the attribute to be used for splitting
 * @return the sets of instances produced by the split
 */
private Instances[] splitData(Instances data, Attribute att) {
    numI = 0;
    splitDataSize = new int[att.numValues()];
    Instances[] splitData = new Instances[att.numValues()];
    for (int j = 0; j < att.numValues(); j++) {
        splitData[j] = new Instances(data, data.numInstances());
    }
    Enumeration instEnum = data.enumerateInstances();
    while (instEnum.hasMoreElements()) {
        Instance inst = (Instance) instEnum.nextElement();
        if (inst.isMissing(att)) {
            // Add to all children.
            for (int k = 0; k < att.numValues(); ++k) {
                splitData[k].add(inst);
            }
        } else {
            int k = (int) inst.value(att);
            splitData[k].add(inst);
            splitDataSize[k]++;
            numI++;
        }
    }
    return splitData;
}

From source file:net.sf.jclal.activelearning.multilabel.querystrategy.AbstractMultiLabelQueryStrategy.java

License:Open Source License

/**
 * Get the true labels of the instance//from   w  w  w  . j av a2  s  .c o  m
 * 
 * @param instance The instance to test
 * @return The true category vector
 */
public boolean[] getTrueLabels(Instance instance) {

    boolean[] trueLabels = new boolean[getNumLabels()];

    for (int counter = 0; counter < getNumLabels(); counter++) {

        int classIdx = getLabelIndices()[counter];

        String classValue = instance.attribute(classIdx).value((int) instance.value(classIdx));

        trueLabels[counter] = classValue.equals("1");
    }

    return trueLabels;
}

From source file:net.sf.jclal.activelearning.multilabel.querystrategy.MultiLabelMeanMaxLossQueryStrategy.java

License:Open Source License

@Override
public double utilityInstance(Instance instance) {

    if (!(((MulanClassifier) getClassifier()).getInternalClassifier() instanceof ParallelBinaryRelevance)) {
        System.err.println(// w ww.j a  va  2 s. c  o m
                "The Mean Max Loss query strategy must be configured with the Parallel Binary Relevance algorithm");
    }

    ParallelBinaryRelevance learner = (ParallelBinaryRelevance) ((MulanClassifier) getClassifier())
            .getInternalClassifier();

    // One SVM classiier for each label
    Classifier[] smos = learner.getEnsemble();

    if (!(smos[0] instanceof SMO)) {
        System.err.println(
                "The base classifiers of the Parallel Binary Relevance algorithm on the Mean Max Loss query strategy must be SVM");
    }

    BinaryRelevanceTransformation brt = learner.getBrt();

    int sum = 0;

    int countLabelsPredicted = 0;

    // To predict the labels
    for (int j = 0; j < getNumLabels(); j++) {

        double result;

        try {

            Instance transformedInstance = brt.transformInstance(instance, j);

            result = Math.abs(((SMO) smos[j]).SVMOutput(transformedInstance));

            result = Math.max(1 - instance.value(getLabelIndices()[j]) * result, 0);

            // the instance belongs to the j-th label
            if (result <= threshold[j]) {
                countLabelsPredicted++;

                for (int l = 0; l < getNumLabels(); l++) {

                    Instance transformedInstance2 = brt.transformInstance(instance, l);

                    result = Math.abs(((SMO) smos[l]).SVMOutput(transformedInstance2));

                    int mjl = -1;

                    if (j == l) {
                        mjl = 1;
                    }

                    sum += Math.max(1 - mjl * result, 0);

                }

            }

        } catch (Exception e) {

            Logger.getLogger(MultiLabelMeanMaxLossQueryStrategy.class.getName()).log(Level.SEVERE, null, e);
        }

    }

    return sum / countLabelsPredicted;

}

From source file:net.sf.jclal.activelearning.multilabel.querystrategy.MultiLabelMeanMaxLossQueryStrategy.java

License:Open Source License

@Override
public void training() {

    super.training();

    ParallelBinaryRelevance learner = (ParallelBinaryRelevance) ((MulanClassifier) getClassifier())
            .getInternalClassifier();/*from  w  w w.  j  a v a2s .c  o  m*/

    Classifier[] smos = learner.getEnsemble();

    BinaryRelevanceTransformation brt = learner.getBrt();

    threshold = new double[getNumLabels()];

    // For each classifier a threshold is computed
    for (int j = 0; j < getNumLabels(); j++) {

        double min = Double.MAX_VALUE;

        // For each instance that belongs to the label
        Instances labeledSet = getLabelledData().getDataset();

        for (Instance instance : labeledSet) {

            if (Utils.eq(instance.value(getLabelIndices()[j]), 0.0)) {
                continue;
            }

            double sum = 0;

            for (int l = 0; l < getNumLabels(); l++) {

                Instance transformedInstance = brt.transformInstance(instance, l);

                try {

                    double result = Math.abs(((SMO) smos[l]).SVMOutput(transformedInstance));

                    int mjl = -1;

                    if (j == l) {
                        mjl = 1;
                    }

                    sum += Math.max(1 - mjl * result, 0);

                } catch (Exception e) {
                    Logger.getLogger(MultiLabelMaxLossQueryStrategy.class.getName()).log(Level.SEVERE, null, e);
                }
            }

            // The var min stores the smaller value
            if (min > sum) {
                min = sum;
            }

        }

        threshold[j] = min;

    }
}

From source file:net.sf.jclal.activelearning.multilabel.querystrategy.MultiLabelMMCQueryStrategy.java

License:Open Source License

/**
 * Convert an instance// ww w . j  av  a  2s .  co  m
 *
 * @param instance Convert a multilabel instance
 * @return The converted instance
 */
public Instance convertInstance(Instance instance) {

    try {

        double[] labelsProbability = new double[getNumLabels()];

        int cantLabels = 0;

        for (int l = 0; l < getNumLabels(); l++) {

            Instance transformedInstance = brt.transformInstance(instance, l);

            classifiersOutputs[l] = Math.abs(((SMO) smos[l]).SVMOutput(transformedInstance));

            // Probability's calculation
            labelsProbability[l] = 1 / (1 + Math.exp(classifiersOutputs[l] + 1));

            if (Utils.eq(instance.value(getLabelIndices()[l]), 1.0)) {

                ++cantLabels;
            }
        }

        // Probability's normalization
        Utils.normalize(labelsProbability);

        // Order
        Arrays.sort(labelsProbability);

        double vector[] = new double[getNumLabels() + 1];

        //To store in descending order
        for (int i = 0; i < getNumLabels(); i++) {

            vector[i] = labelsProbability[getNumLabels() - i - 1];
        }

        // The class is the number of relevant labels
        vector[getNumLabels()] = cantLabels;

        DenseInstance newInstance = new DenseInstance(1.0, vector);

        return newInstance;

    } catch (Exception e) {
        // TODO Auto-generated catch block
        Logger.getLogger(MultiLabelMMCQueryStrategy.class.getName()).log(Level.SEVERE, null, e);
    }

    return null;

}

From source file:NeuralNetwork.Layer.java

public void setFirstLayer(Instance instance) {
    for (int i = 1; i < neurons.size(); i++) {
        neurons.get(i).setValue(instance.value(i - 1));
    }/*from   w  ww .j a  va  2  s . c  o  m*/
}

From source file:neuralnetwork.NeuralNet.java

private void initializeNetwork(Instance pInst) {
    double output;
    List<Double> tempArr;
    Neuron tempNeuron;/*from   w ww. jav a2 s . com*/
    mInput.add(mBias);

    for (int i = 0; i < (mArrSize); i++) {
        mInput.add(pInst.value(i));
    }

    for (int j = 0; j < mLayers; j++) {
        List<Neuron> neurons = new ArrayList<>();
        if (j < mLayers - 1) {
            tempArr = mOutput;
            mOutput.clear();
            for (int k = 0; k <= mhNeurons; k++) {
                neurons.add(new Neuron(mInput.size()));
                if (j == 0) {
                    tempNeuron = neurons.get(k);
                    output = tempNeuron.inputNeuron(mInput, mInput.size());
                    mOutput.add(output);
                } else {

                    tempNeuron = neurons.get(k);
                    if (k == 0) {
                        mOutput.add(mBias);
                        output = tempNeuron.inputNeuron(tempArr, tempArr.size());
                        mOutput.add(output);

                    } else {
                        output = tempNeuron.inputNeuron(tempArr, tempArr.size());
                        mOutput.add(output);
                    }
                }
            }
        } else {
            tempArr = mOutput;
            mOutput.clear();
            for (int i = 0; i <= moNeurons; i++) {
                neurons.add(new Neuron(tempArr.size()));

                tempNeuron = neurons.get(i);
                output = tempNeuron.inputNeuron(tempArr, tempArr.size());
                mOutput.add(output);
            }
        }

        mNetwork.add(neurons);
    }

    mInput.clear();
    mOutput.clear();
}

From source file:neuralnetwork.NeuralNet.java

private double trainNetwork(Instance pInst) {
    double error = 0.0;
    double temp = runNetwork(pInst);
    int cIndex = pInst.classIndex();
    double target;
    target = pInst.value(cIndex);

    for (int i = mLayers - 1; i >= 0; i--) {
        // Calculate error
        if (i == (mLayers - 1)) {
            // i = input from layer on the left
            // j = current node
            // Output layer error
            // errj = aj(1 - aj)(aj - tj)
            for (int j = 0; j < mClassNum; j++) {
                temp = mOutput.get(j);//from  www  . ja v a  2s. c  o  m
                if (target == (double) j) {
                    error = temp * (1 - temp) * (temp - target);
                } else {
                    error = temp * (1 - temp) * (temp - 0.0);
                }
            }

        } else {
            // j = current node;
            // k = node from the layer on the right
            // Hidden layer error
            // errj = aj(1 - aj) * sum(wjk * errk)
        }

        // Update weight ij
        // wij <- wij - mlRate * errj * ai

        //mNetwork.set(j, neurons);
    }

    return error;
}

From source file:neuralnetwork.NeuralNet.java

private double runNetwork(Instance pInst) {
    mInput.clear();// w w w .  ja  v a 2 s.co m
    mOutput.clear();
    double classNum = 0.0;
    double outputVal = 0.0;
    double output;
    List<Double> tempArr;
    Neuron tempNeuron;
    mInput.add(mBias);

    for (int i = 0; i < (mArrSize); i++) {
        mInput.add(pInst.value(i));
    }

    for (int j = 0; j < mLayers; j++) {
        List<Neuron> neurons;
        neurons = mNetwork.get(j);

        if (j < mLayers - 1) {
            tempArr = mOutput;
            mOutput.clear();
            for (int k = 0; k <= mhNeurons; k++) {

                if (j == 0) {
                    tempNeuron = neurons.get(k);
                    output = tempNeuron.inputNeuron(mInput, mInput.size());
                    mOutput.add(output);
                } else {

                    tempNeuron = neurons.get(k);
                    if (k == 0) {
                        mOutput.add(mBias);
                        output = tempNeuron.inputNeuron(tempArr, tempArr.size());
                        mOutput.add(output);

                    } else {
                        output = tempNeuron.inputNeuron(tempArr, tempArr.size());
                        mOutput.add(output);
                    }
                }
            }
        } else {
            tempArr = mOutput;
            mOutput.clear();
            for (int i = 0; i <= moNeurons; i++) {
                tempNeuron = neurons.get(i);
                output = tempNeuron.inputNeuron(tempArr, tempArr.size());
                mOutput.add(output);
            }
        }

    }

    for (int num = 0; num < mClassNum; num++) {
        if (mOutput.get(num) > outputVal) {
            classNum = num;
        }
    }

    return classNum;
}