Example usage for weka.core Instance weight

List of usage examples for weka.core Instance weight

Introduction

In this page you can find the example usage for weka.core Instance weight.

Prototype

public double weight();

Source Link

Document

Returns the instance's weight.

Usage

From source file:moa.classifiers.meta.LeveragingBag.java

License:Open Source License

@Override
public void trainOnInstanceImpl(Instance inst) {
    int numClasses = inst.numClasses();
    //Output Codes
    if (this.initMatrixCodes == true) {
        this.matrixCodes = new int[this.ensemble.length][inst.numClasses()];
        for (int i = 0; i < this.ensemble.length; i++) {
            int numberOnes;
            int numberZeros;

            do { // until we have the same number of zeros and ones
                numberOnes = 0;/*  w w  w. j  a  va 2  s  .  c  om*/
                numberZeros = 0;
                for (int j = 0; j < numClasses; j++) {
                    int result = 0;
                    if (j == 1 && numClasses == 2) {
                        result = 1 - this.matrixCodes[i][0];
                    } else {
                        result = (this.classifierRandom.nextBoolean() ? 1 : 0);
                    }
                    this.matrixCodes[i][j] = result;
                    if (result == 1) {
                        numberOnes++;
                    } else {
                        numberZeros++;
                    }
                }
            } while ((numberOnes - numberZeros) * (numberOnes - numberZeros) > (this.ensemble.length % 2));

        }
        this.initMatrixCodes = false;
    }

    boolean Change = false;
    Instance weightedInst = (Instance) inst.copy();
    double w = this.weightShrinkOption.getValue();

    //Train ensemble of classifiers
    for (int i = 0; i < this.ensemble.length; i++) {
        double k = 0.0;
        switch (this.leveraginBagAlgorithmOption.getChosenIndex()) {
        case 0: //LeveragingBag
            k = MiscUtils.poisson(w, this.classifierRandom);
            break;
        case 1: //LeveragingBagME
            double error = this.ADError[i].getEstimation();
            k = !this.ensemble[i].correctlyClassifies(weightedInst) ? 1.0
                    : (this.classifierRandom.nextDouble() < (error / (1.0 - error)) ? 1.0 : 0.0);
            break;
        case 2: //LeveragingBagHalf
            w = 1.0;
            k = this.classifierRandom.nextBoolean() ? 0.0 : w;
            break;
        case 3: //LeveragingBagWT
            w = 1.0;
            k = 1.0 + MiscUtils.poisson(w, this.classifierRandom);
            break;
        case 4: //LeveragingSubag
            w = 1.0;
            k = MiscUtils.poisson(1, this.classifierRandom);
            k = (k > 0) ? w : 0;
            break;
        }
        if (k > 0) {
            if (this.outputCodesOption.isSet()) {
                weightedInst.setClassValue((double) this.matrixCodes[i][(int) inst.classValue()]);
            }
            weightedInst.setWeight(inst.weight() * k);
            this.ensemble[i].trainOnInstance(weightedInst);
        }
        boolean correctlyClassifies = this.ensemble[i].correctlyClassifies(weightedInst);
        double ErrEstim = this.ADError[i].getEstimation();
        if (this.ADError[i].setInput(correctlyClassifies ? 0 : 1)) {
            if (this.ADError[i].getEstimation() > ErrEstim) {
                Change = true;
            }
        }
    }
    if (Change) {
        numberOfChangesDetected++;
        double max = 0.0;
        int imax = -1;
        for (int i = 0; i < this.ensemble.length; i++) {
            if (max < this.ADError[i].getEstimation()) {
                max = this.ADError[i].getEstimation();
                imax = i;
            }
        }
        if (imax != -1) {
            this.ensemble[imax].resetLearning();
            //this.ensemble[imax].trainOnInstance(inst);
            this.ADError[imax] = new ADWIN((double) this.deltaAdwinOption.getValue());
        }
    }
}

From source file:moa.classifiers.meta.OCBoost.java

License:Open Source License

@Override
public void trainOnInstanceImpl(Instance inst) {
    double d = 1.0;
    int[] m = new int[this.ensemble.length];
    for (int j = 0; j < this.ensemble.length; j++) {
        int j0 = 0; //max(0,j-K)
        pipos[j] = 1.0;/*from  ww w  .j  a  va 2 s. c  o m*/
        pineg[j] = 1.0;
        m[j] = -1;
        if (this.ensemble[j].correctlyClassifies(inst) == true) {
            m[j] = 1;
        }
        for (int k = j0; k <= j - 1; k++) {
            pipos[j] *= wpos[j][k] / wpos[j][j] * Math.exp(-alphainc[k])
                    + (1.0 - wpos[j][k] / wpos[j][j]) * Math.exp(alphainc[k]);
            pineg[j] *= wneg[j][k] / wneg[j][j] * Math.exp(-alphainc[k])
                    + (1.0 - wneg[j][k] / wneg[j][j]) * Math.exp(alphainc[k]);
        }
        for (int k = 0; k <= j; k++) {
            wpos[j][k] = wpos[j][k] * pipos[j] + d * (m[k] == 1 ? 1 : 0) * (m[j] == 1 ? 1 : 0);
            wneg[j][k] = wneg[j][k] * pineg[j] + d * (m[k] == -1 ? 1 : 0) * (m[j] == -1 ? 1 : 0);
        }
        alphainc[j] = -alpha[j];
        alpha[j] = 0.5 * Math.log(wpos[j][j] / wneg[j][j]);
        alphainc[j] += alpha[j];

        d = d * Math.exp(-alpha[j] * m[j]);

        if (d > 0.0) {
            Instance weightedInst = (Instance) inst.copy();
            weightedInst.setWeight(inst.weight() * d);
            this.ensemble[j].trainOnInstance(weightedInst);
        }
    }
}

From source file:moa.classifiers.meta.OzaBag.java

License:Open Source License

@Override
public void trainOnInstanceImpl(Instance inst) {
    for (int i = 0; i < this.ensemble.length; i++) {
        int k = MiscUtils.poisson(1.0, this.classifierRandom);
        if (k > 0) {
            Instance weightedInst = (Instance) inst.copy();
            weightedInst.setWeight(inst.weight() * k);
            this.ensemble[i].trainOnInstance(weightedInst);
        }//from   w  w  w .  ja v a  2s . c o m
    }
}

From source file:moa.classifiers.meta.OzaBagAdwin.java

License:Open Source License

@Override
public void trainOnInstanceImpl(Instance inst) {
    boolean Change = false;
    for (int i = 0; i < this.ensemble.length; i++) {
        int k = MiscUtils.poisson(1.0, this.classifierRandom);
        if (k > 0) {
            Instance weightedInst = (Instance) inst.copy();
            weightedInst.setWeight(inst.weight() * k);
            this.ensemble[i].trainOnInstance(weightedInst);
        }//from www . ja v  a  2s . c  om
        boolean correctlyClassifies = this.ensemble[i].correctlyClassifies(inst);
        double ErrEstim = this.ADError[i].getEstimation();
        if (this.ADError[i].setInput(correctlyClassifies ? 0 : 1)) {
            if (this.ADError[i].getEstimation() > ErrEstim) {
                Change = true;
            }
        }
    }
    if (Change) {
        double max = 0.0;
        int imax = -1;
        for (int i = 0; i < this.ensemble.length; i++) {
            if (max < this.ADError[i].getEstimation()) {
                max = this.ADError[i].getEstimation();
                imax = i;
            }
        }
        if (imax != -1) {
            this.ensemble[imax].resetLearning();
            //this.ensemble[imax].trainOnInstance(inst);
            this.ADError[imax] = new ADWIN();
        }
    }
}

From source file:moa.classifiers.meta.OzaBagASHT.java

License:Open Source License

@Override
public void trainOnInstanceImpl(Instance inst) {
    int trueClass = (int) inst.classValue();
    for (int i = 0; i < this.ensemble.length; i++) {
        int k = MiscUtils.poisson(1.0, this.classifierRandom);
        if (k > 0) {
            Instance weightedInst = (Instance) inst.copy();
            weightedInst.setWeight(inst.weight() * k);
            if (Utils.maxIndex(this.ensemble[i].getVotesForInstance(inst)) == trueClass) {
                this.error[i] += alpha * (0.0 - this.error[i]); //EWMA
            } else {
                this.error[i] += alpha * (1.0 - this.error[i]); //EWMA
            }//from w w w  .  j  a  v a  2s .  c o m
            this.ensemble[i].trainOnInstance(weightedInst);
        }
    }
}

From source file:moa.classifiers.meta.OzaBagLambda.java

License:Open Source License

@Override
public void trainOnInstanceImpl(Instance inst) {
    for (int i = 0; i < this.ensemble.length; i++) {
        int k = MiscUtils.poisson(this.lambdaOption.getValue(), this.classifierRandom);

        if (!m_debug) {
            if (k > 0) {
                Instance weightedInst = (Instance) inst.copy();
                weightedInst.setWeight(inst.weight() * k);
                this.ensemble[i].trainOnInstance(weightedInst);
            }/* ww w  .  ja  va 2s  . c  om*/
        }
        if (m_debug) {
            System.out.println(inst.weight() * k);
        }
    }
}

From source file:moa.classifiers.meta.OzaBoost.java

License:Open Source License

@Override
public void trainOnInstanceImpl(Instance inst) {
    double lambda_d = 1.0;
    for (int i = 0; i < this.ensemble.length; i++) {
        double k = this.pureBoostOption.isSet() ? lambda_d : MiscUtils.poisson(lambda_d, this.classifierRandom);
        if (k > 0.0) {
            Instance weightedInst = (Instance) inst.copy();
            weightedInst.setWeight(inst.weight() * k);
            this.ensemble[i].trainOnInstance(weightedInst);
        }//ww w  .  j  a  v  a2 s  .  co  m
        if (this.ensemble[i].correctlyClassifies(inst)) {
            this.scms[i] += lambda_d;
            lambda_d *= this.trainingWeightSeenByModel / (2 * this.scms[i]);
        } else {
            this.swms[i] += lambda_d;
            lambda_d *= this.trainingWeightSeenByModel / (2 * this.swms[i]);
        }
    }
}

From source file:moa.classifiers.meta.OzaBoostAdwin.java

License:Open Source License

@Override
public void trainOnInstanceImpl(Instance inst) {
    int numClasses = inst.numClasses();
    // Set log (k-1) and (k-1) for SAMME Method
    if (this.sammeOption.isSet()) {
        this.Km1 = numClasses - 1;
        this.logKm1 = Math.log(this.Km1);
        this.initKm1 = false;
    }/*www .  j  ava2 s  . c  o  m*/
    //Output Codes
    if (this.initMatrixCodes == true) {

        this.matrixCodes = new int[this.ensemble.length][inst.numClasses()];
        for (int i = 0; i < this.ensemble.length; i++) {
            int numberOnes;
            int numberZeros;

            do { // until we have the same number of zeros and ones
                numberOnes = 0;
                numberZeros = 0;
                for (int j = 0; j < numClasses; j++) {
                    int result = 0;
                    if (j == 1 && numClasses == 2) {
                        result = 1 - this.matrixCodes[i][0];
                    } else {
                        result = (this.classifierRandom.nextBoolean() ? 1 : 0);
                    }
                    this.matrixCodes[i][j] = result;
                    if (result == 1) {
                        numberOnes++;
                    } else {
                        numberZeros++;
                    }
                }
            } while ((numberOnes - numberZeros) * (numberOnes - numberZeros) > (this.ensemble.length % 2));

        }
        this.initMatrixCodes = false;
    }

    boolean Change = false;
    double lambda_d = 1.0;
    Instance weightedInst = (Instance) inst.copy();
    for (int i = 0; i < this.ensemble.length; i++) {
        double k = this.pureBoostOption.isSet() ? lambda_d
                : MiscUtils.poisson(lambda_d * this.Km1, this.classifierRandom);
        if (k > 0.0) {
            if (this.outputCodesOption.isSet()) {
                weightedInst.setClassValue((double) this.matrixCodes[i][(int) inst.classValue()]);
            }
            weightedInst.setWeight(inst.weight() * k);
            this.ensemble[i].trainOnInstance(weightedInst);
        }
        boolean correctlyClassifies = this.ensemble[i].correctlyClassifies(weightedInst);
        if (correctlyClassifies) {
            this.scms[i] += lambda_d;
            lambda_d *= this.trainingWeightSeenByModel / (2 * this.scms[i]);
        } else {
            this.swms[i] += lambda_d;
            lambda_d *= this.trainingWeightSeenByModel / (2 * this.swms[i]);
        }

        double ErrEstim = this.ADError[i].getEstimation();
        if (this.ADError[i].setInput(correctlyClassifies ? 0 : 1)) {
            if (this.ADError[i].getEstimation() > ErrEstim) {
                Change = true;
            }
        }
    }
    if (Change) {
        numberOfChangesDetected++;
        double max = 0.0;
        int imax = -1;
        for (int i = 0; i < this.ensemble.length; i++) {
            if (max < this.ADError[i].getEstimation()) {
                max = this.ADError[i].getEstimation();
                imax = i;
            }
        }
        if (imax != -1) {
            this.ensemble[imax].resetLearning();
            //this.ensemble[imax].trainOnInstance(inst);
            this.ADError[imax] = new ADWIN((double) this.deltaAdwinOption.getValue());
            this.scms[imax] = 0;
            this.swms[imax] = 0;
        }
    }
}

From source file:moa.classifiers.meta.PAME.java

License:Open Source License

@Override
public void trainOnInstanceImpl(Instance inst) {

    // get the prediction vector back
    double[] ht = this.getPredictions(inst);
    double yt = inst.classValue();
    if (inst.classIndex() == 0) {
        this.rareCount += 1.0;
    }/*from w w w  .  ja  v  a  2  s .  c om*/
    this.count += 1.0;

    // convert to a positive / negative classification scenario
    if (yt == 0) {
        yt = 1.0;
    } else {
        yt = -1.0;
    }

    /*
     * update expert weights
     */
    if (this.updateMethodOption.getChosenIndex() == PAME1) {
        pame1_weights(ht, yt);
    } else if (this.updateMethodOption.getChosenIndex() == PAME2) {
        pame2_weights(ht, yt);
    } else if (this.updateMethodOption.getChosenIndex() == PAME3) {
        pame3_weights(ht, yt);
    }

    /*
     * we are going to use an online bagging / boosting strategy to update the 
     * experts. In the end our objective with the weight formulation is a bit
     * more of a decision theoretic approach. 
     */
    for (int i = 0; i < this.ensemble.length; i++) {
        // sample from a Poisson probability distribution as implemented in 
        // online bagging and boosting]
        double w;
        if (this.overSampleOption.isSet() && inst.classIndex() == 0) {
            w = 1.0 / (this.rareCount / this.count);
            if (this.logTransformOption.isSet()) {
                w = Math.log(w);
            }
        } else if (this.underSampleOption.isSet() && inst.classIndex() != 0) {
            w = 1.0 - this.rareCount / this.count;
        } else {
            w = 1.0;
        }

        int k = MiscUtils.poisson(w, this.classifierRandom);

        // update the expert accordingly 
        if (k > 0) {
            // this works by updating the expert k-times with the same example.
            // thus is k = 4. the expert is trained updated on the same example
            // 4 times in a row. pretty easy.
            Instance weightedInst = (Instance) inst.copy();
            weightedInst.setWeight(inst.weight() * k); // set the # of training times
            this.ensemble[i].trainOnInstance(weightedInst); // update expert
        }
    }

    this.n_negativeWeights = 0;
    for (int i = 0; i < this.weights.length; i++) {
        if (this.weights[i] < 0.0)
            this.n_negativeWeights++;
    }
}

From source file:moa.classifiers.meta.PAMEAdwin.java

License:Open Source License

@Override
public void trainOnInstanceImpl(Instance inst) {

    // get the prediction vector back
    double[] ht = this.getPredictions(inst);
    double yt = inst.classValue();
    if (inst.classIndex() == 0) {
        this.rareCount += 1.0;
    }/*from   w w  w  .  ja  v a 2s  .com*/
    this.count += 1.0;

    // convert to a positive / negative classification scenario
    if (yt == 0) {
        //System.out.println("Y is positive" + yt);
        yt = 1.0;
    } else {
        //System.out.println("Y is negative" + yt);
        yt = -1.0;
    }

    /*
     * update expert weights
     */
    if (this.updateMethodOption.getChosenIndex() == PAME1) {
        pame1_weights(ht, yt);
    } else if (this.updateMethodOption.getChosenIndex() == PAME2) {
        pame2_weights(ht, yt);
    } else if (this.updateMethodOption.getChosenIndex() == PAME3) {
        pame3_weights(ht, yt);
    } else if (this.updateMethodOption.getChosenIndex() == PAME23) {
        pame23_weights(ht, yt);
    }

    /*
     * we are going to use an online bagging / boosting strategy to update the 
     * experts. In the end our objective with the weight formulation is a bit
     * more of a decision theoretic approach. 
     */
    boolean Change = false;
    for (int i = 0; i < this.ensemble.length; i++) {
        // sample from a Poisson probability distribution as implemented in 
        // online bagging and boosting
        double w;
        if (this.overSampleOption.isSet() && inst.classIndex() == 0) {
            w = 1.0 / (this.rareCount / this.count);
            if (this.logTransformOption.isSet()) {
                w = Math.log(w);
            }
        } else if (this.underSampleOption.isSet() && inst.classIndex() != 0) {
            w = 1.0 - this.rareCount / this.count;
        } else {
            w = 1.0;
        }

        int k = MiscUtils.poisson(w, this.classifierRandom);

        // update the expert accordingly 
        if (k > 0) {
            // this works by updating the expert k-times with the same example.
            // thus is k = 4. the expert is trained updated on the same example
            // 4 times in a row. pretty easy.
            Instance weightedInst = (Instance) inst.copy();
            weightedInst.setWeight(inst.weight() * k); // set the # of training times
            this.ensemble[i].trainOnInstance(weightedInst); // update expert
        }
        boolean correctlyClassifies = this.ensemble[i].correctlyClassifies(inst);
        double ErrEstim = this.ADError[i].getEstimation();
        if (this.ADError[i].setInput(correctlyClassifies ? 0 : 1)) {
            if (this.ADError[i].getEstimation() > ErrEstim) {
                Change = true;
            }
        }
    }

    /*
     * if change was detected, remove the worst expert from the ensemble of 
     * classifiers. 
     */
    if (Change) {
        double max = 0.0;
        int imax = -1;
        for (int i = 0; i < this.ensemble.length; i++) {
            if (max < this.ADError[i].getEstimation()) {
                max = this.ADError[i].getEstimation();
                imax = i;
            }
        }
        if (imax != -1) {
            this.ensemble[imax].resetLearning();
            //this.ensemble[imax].trainOnInstance(inst);
            this.ADError[imax] = new ADWIN();
        }
    }

    this.n_negativeWeights = 0;
    for (int i = 0; i < this.weights.length; i++) {
        if (this.weights[i] < 0.0)
            this.n_negativeWeights++;
    }
}