Example usage for weka.core Instances randomize

List of usage examples for weka.core Instances randomize

Introduction

In this page you can find the example usage for weka.core Instances randomize.

Prototype

public void randomize(Random random) 

Source Link

Document

Shuffles the instances in the set so that they are ordered randomly.

Usage

From source file:gr.auth.ee.lcs.ArffTrainTestLoader.java

License:Open Source License

/**
 * Load instances into the global train store and create test set.
 * // w w w  . j  a v a 2 s  . c o  m
 * @param filename
 *            the .arff filename to be used
 * @param testFile
 *            the test file to be loaded
 * @throws IOException
 *             if the input file is not found
 */
public final void loadInstancesWithTest(final String filename, final String testFile) throws IOException {

    // Open .arff
    final Instances set = InstancesUtility.openInstance(filename);

    if (set.classIndex() < 0)
        set.setClassIndex(set.numAttributes() - 1);
    set.randomize(new Random());
    trainSet = set;

    myLcs.instances = InstancesUtility.convertIntancesToDouble(trainSet);
    myLcs.labelCardinality = InstancesUtility.getLabelCardinality(trainSet);
    testSet = InstancesUtility.openInstance(testFile);

    myLcs.trainSet = trainSet;
    myLcs.testSet = testSet;

    myLcs.testInstances = InstancesUtility.convertIntancesToDouble(testSet);

    System.out.println("Label cardinality: " + myLcs.labelCardinality);

}

From source file:gyc.OverBoostM1.java

License:Open Source License

/**
 * // w w  w . ja va  2s.c o m
 * nMajnMin
 * @param data
 * @param i
 * @return
 */
protected Instances randomSampling(Instances copia, int majC, int minC, int nMaj, int nMin,
        Random simplingRandom) {
    int[] majExamples = new int[copia.numInstances()];
    int[] minExamples = new int[copia.numInstances()];
    int majCount = 0, minCount = 0;
    // First, we copy the examples from the minority class and save the indexes of the majority
    // the new data-set contains samples_min + samples_min * N / 100
    int size = nMaj + nMin;
    //selected = new int[size]; // we store the selected examples indexes

    String majClassName = copia.attribute(copia.classIndex()).value(majC);

    Instances myDataset = new Instances(copia, 0);
    int nData = 0;
    for (int i = 0; i < copia.numInstances(); i++) {
        if (copia.instance(i).stringValue(copia.classIndex()).equalsIgnoreCase(majClassName)) {
            // save index
            majExamples[majCount] = i;
            majCount++;
        } else {
            minExamples[minCount] = i;
            minCount++;
        }
    }
    if (minCount <= 0)
        return copia;
    /* random undersampling of the majority */
    //boolean[] taken = new boolean[copia.numInstances()];
    int r;
    if (nMaj == majCount) {
        //System.out.println("#equal");
        for (int i = 0; i < nMaj; i++) {
            myDataset.add(copia.instance(majExamples[i]));
        }
    } else {
        for (int i = 0; i < nMaj; i++) {
            r = simplingRandom.nextInt(majCount);
            //selected[nData] = majExamples[r];
            myDataset.add(copia.instance(majExamples[r]));
            //taken[majExamples[r]] = true;
        }
    }
    for (int i = 0; i < nMin; i++) {
        r = simplingRandom.nextInt(minCount);
        //System.out.print("_"+r);

        //selected[nData] = minExamples[r];
        myDataset.add(copia.instance(minExamples[r]));
        //taken[minExamples[r]] = true;
    }

    //System.out.println();
    //System.out.println("minC="+minCount+"; majC="+majCount);

    myDataset.randomize(simplingRandom);
    return myDataset;
}

From source file:gyc.SMOTEBagging.java

License:Open Source License

/**
 * //from   w  w w.jav  a  2s .c  om
 * 100%majminSMOTE (k, a).
 * @param data
 * @param i
 * @return
 */
protected Instances randomSampling(Instances copia, int majC, int minC, int a, Random simplingRandom) {
    int[] majExamples = new int[copia.numInstances()];
    int[] minExamples = new int[copia.numInstances()];
    int majCount = 0, minCount = 0;
    // First, we copy the examples from the minority class and save the indexes of the majority
    // resample min at rate (Nmaj/Nmin)*a%
    int size = copia.attributeStats(copia.classIndex()).nominalCounts[majC] * a / 100;
    // class name
    String majClassName = copia.attribute(copia.classIndex()).value(majC);

    for (int i = 0; i < copia.numInstances(); i++) {
        if (copia.instance(i).stringValue(copia.classIndex()).equalsIgnoreCase(majClassName)) {
            // save index
            majExamples[majCount] = i;
            majCount++;
        } else {
            minExamples[minCount] = i;
            minCount++;
        }
    }

    /* random undersampling of the majority */
    Instances myDataset = new Instances(copia, 0);
    int r;
    //100%majC
    for (int i = 0; i < majCount; i++) {
        myDataset.add(copia.instance(majExamples[i]));
    }
    if (minCount == 0)
        return myDataset;
    //(Nmaj/Nmin)*a% minC
    for (int i = 0; i < size; i++) {
        r = simplingRandom.nextInt(minCount);
        myDataset.add(copia.instance(minExamples[r]));
    }
    myDataset.randomize(simplingRandom);

    if (size == 1) {
        try {
            //neighbor
            Resample filter = new Resample();
            filter.setInputFormat(myDataset);
            filter.setBiasToUniformClass(1.0);
            filter.setRandomSeed(simplingRandom.nextInt());
            myDataset = Filter.useFilter(myDataset, filter);
        } catch (Exception e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    }
    if (size > 1) {
        try {
            SMOTE filter = new SMOTE();
            filter.setInputFormat(myDataset); // filter capabilities are checked here
            //data.
            double value = 100.0 * majCount / size - 100;
            //Percentage
            filter.setPercentage(value);
            //if (nMin<5) filter.setNearestNeighbors(nMin);
            filter.setRandomSeed(simplingRandom.nextInt());
            //filterSMOTESMOTE
            myDataset = Filter.useFilter(myDataset, filter);
            //t.stop();
        } catch (Exception e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    }

    return myDataset;
}

From source file:gyc.UnderOverBoostM1.java

License:Open Source License

/**
 * //  w  ww.  ja v a  2 s. co m
 * nMajnMin
 * @param data
 * @param i
 * @return
 */
protected Instances randomSampling(Instances copia, int majC, int minC, int a, Random simplingRandom) {
    int[] majExamples = new int[copia.numInstances()];
    int[] minExamples = new int[copia.numInstances()];
    int majCount = 0, minCount = 0;
    // First, we copy the examples from the minority class and save the indexes of the majority
    // the new data-set contains samples_min + samples_min * N / 100
    int size = copia.attributeStats(copia.classIndex()).nominalCounts[majC] * a / 100 * 2;
    // class name
    String majClassName = copia.attribute(copia.classIndex()).value(majC);

    for (int i = 0; i < copia.numInstances(); i++) {
        if (copia.instance(i).stringValue(copia.classIndex()).equalsIgnoreCase(majClassName)) {
            // save index
            majExamples[majCount] = i;
            majCount++;
        } else {
            minExamples[minCount] = i;
            minCount++;
        }
    }

    /* random undersampling of the majority */
    Instances myDataset = new Instances(copia, 0);
    int r;
    for (int i = 0; i < size / 2; i++) {
        r = simplingRandom.nextInt(majCount);
        myDataset.add(copia.instance(majExamples[r]));

        if (minCount > 0) {
            r = simplingRandom.nextInt(minCount);
            myDataset.add(copia.instance(minExamples[r]));
        }
    }

    myDataset.randomize(simplingRandom);
    return myDataset;
}

From source file:hurtowniedanych.FXMLController.java

public void trainAndTestKNN() throws FileNotFoundException, IOException, Exception {

    InstanceQuery instanceQuery = new InstanceQuery();
    instanceQuery.setUsername("postgres");
    instanceQuery.setPassword("szupek");
    instanceQuery.setCustomPropsFile(new File("./src/data/DatabaseUtils.props")); // Wskazanie pliku z ustawieniami dla PostgreSQL

    String query = "select ks.wydawnictwo,ks.gatunek, kl.mia-sto\n" + "from zakupy z,ksiazki ks,klienci kl\n"
            + "where ks.id_ksiazka=z.id_ksiazka and kl.id_klient=z.id_klient";

    instanceQuery.setQuery(query);/*from   w  w w .j a  v  a  2 s. c om*/
    Instances data = instanceQuery.retrieveInstances();
    data.setClassIndex(data.numAttributes() - 1);

    data.randomize(new Random());
    double percent = 70.0;
    int trainSize = (int) Math.round(data.numInstances() * percent / 100);
    int testSize = data.numInstances() - trainSize;
    Instances trainData = new Instances(data, 0, trainSize);
    Instances testData = new Instances(data, trainSize, testSize);

    int lSasiadow = Integer.parseInt(textFieldKnn.getText());
    System.out.println(lSasiadow);

    IBk ibk = new IBk(lSasiadow);

    // Ustawienie odleglosci
    EuclideanDistance euclidean = new EuclideanDistance(); // euklidesowej
    ManhattanDistance manhatan = new ManhattanDistance(); // miejska  

    LinearNNSearch linearNN = new LinearNNSearch();

    if (comboboxOdleglosc.getSelectionModel().getSelectedItem().equals("Manhatan")) {
        linearNN.setDistanceFunction(manhatan);
    } else {
        linearNN.setDistanceFunction(euclidean);
    }

    ibk.setNearestNeighbourSearchAlgorithm(linearNN); // ustawienie sposobu szukania sasiadow

    // Tworzenie klasyfikatora
    ibk.buildClassifier(trainData);

    Evaluation eval = new Evaluation(trainData);
    eval.evaluateModel(ibk, testData);
    spr.setVisible(true);
    labelKnn.setVisible(true);
    labelOdleglosc.setVisible(true);
    labelKnn.setText(textFieldKnn.getText());
    labelOdleglosc.setText(comboboxOdleglosc.getSelectionModel().getSelectedItem().toString());
    spr.setText(eval.toSummaryString("Wynik:", true));
}

From source file:id3classifier.Main.java

public static void main(String[] args) throws Exception {

    ConverterUtils.DataSource source = new ConverterUtils.DataSource(file);
    Instances dataSet = source.getDataSet();

    // discretize the dataset
    Discretize filter = new Discretize();
    filter.setInputFormat(dataSet);/*from ww w  .ja va2  s . c om*/
    dataSet = Filter.useFilter(dataSet, filter);

    // standardize the dataset
    Standardize standardizedData = new Standardize();
    standardizedData.setInputFormat(dataSet);
    dataSet = Filter.useFilter(dataSet, standardizedData);

    // randomize the dataset
    dataSet.setClassIndex(dataSet.numAttributes() - 1);
    dataSet.randomize(new Debug.Random());

    // get the sizes of the training and testing sets and split
    int trainingSize = (int) Math.round(dataSet.numInstances() * .7);
    int testSize = dataSet.numInstances() - trainingSize;
    Instances training = new Instances(dataSet, 0, trainingSize);
    Instances test = new Instances(dataSet, trainingSize, testSize);

    // set up the ID3 classifier on the training data
    ID3Classifiers classifier = new ID3Classifiers();
    classifier.buildClassifier(training);

    // set up the evaluation and test using the classifier and test set
    Evaluation eval = new Evaluation(dataSet);
    eval.evaluateModel(classifier, test);

    // outup and kill, important to exit here to stop javaFX
    System.out.println(eval.toSummaryString("\nResults\n======\n", false));
    System.exit(0);
}

From source file:id3j48.WekaAccess.java

public static Evaluation percentageSplit(Instances data, Classifier classifier, int percentage)
        throws Exception {
    Instances tempdata = new Instances(data);
    tempdata.randomize(new Random(1));

    int trainSize = Math.round(tempdata.numInstances() * percentage / 100);
    int testSize = tempdata.numInstances() - trainSize;
    Instances train = new Instances(tempdata, 0, trainSize);
    Instances test = new Instances(tempdata, trainSize, testSize);

    classifier.buildClassifier(train);/*from   w w w . j  a v  a2 s  .c  o  m*/
    Evaluation eval = testModel(classifier, train, test);
    return eval;
}

From source file:irisdata.IrisData.java

/**
 * @param args the command line arguments
 * @throws java.lang.Exception /*from w  ww  .jav  a2  s  .  com*/
 */
public static void main(String[] args) throws Exception {

    String file = "/Users/paul/Desktop/BYU-Idaho/Spring2015/CS450/iris.csv";

    DataSource source = new DataSource(file);
    Instances data = source.getDataSet();

    if (data.classIndex() == -1) {
        data.setClassIndex(data.numAttributes() - 1);
    }

    data.randomize(new Random(1));

    // set training set to 70%
    RemovePercentage remove = new RemovePercentage();
    remove.setPercentage(30);
    remove.setInputFormat(data);
    Instances trainingSet = Filter.useFilter(data, remove);

    // set the rest for the testing set
    remove.setInvertSelection(true);
    Instances testSet = Filter.useFilter(data, remove);

    // train classifier - kind of
    HardCodedClassifier classifier = new HardCodedClassifier();
    classifier.buildClassifier(trainingSet); // this does nothing right now

    // Evaluate classifier
    Evaluation eval = new Evaluation(trainingSet);
    eval.evaluateModel(classifier, testSet);
    //eval.crossValidateModel(classifier, data, 10, new Random(1));

    // Print some statistics
    System.out.println("Results: " + eval.toSummaryString());

}

From source file:it.unisa.gitdm.evaluation.WekaEvaluator.java

private static void evaluateModel(String baseFolderPath, String projectName, Classifier pClassifier,
        Instances pInstances, String pModelName, String pClassifierName) throws Exception {

    // other options
    int folds = 10;

    // randomize data
    Random rand = new Random(42);
    Instances randData = new Instances(pInstances);
    randData.randomize(rand);
    if (randData.classAttribute().isNominal()) {
        randData.stratify(folds);/*from   ww  w  .ja va  2 s  .  c o  m*/
    }

    // perform cross-validation and add predictions
    Instances predictedData = null;
    Evaluation eval = new Evaluation(randData);

    int positiveValueIndexOfClassFeature = 0;
    for (int n = 0; n < folds; n++) {
        Instances train = randData.trainCV(folds, n);
        Instances test = randData.testCV(folds, n);
        // the above code is used by the StratifiedRemoveFolds filter, the
        // code below by the Explorer/Experimenter:
        // Instances train = randData.trainCV(folds, n, rand);

        int classFeatureIndex = 0;
        for (int i = 0; i < train.numAttributes(); i++) {
            if (train.attribute(i).name().equals("isBuggy")) {
                classFeatureIndex = i;
                break;
            }
        }

        Attribute classFeature = train.attribute(classFeatureIndex);
        for (int i = 0; i < classFeature.numValues(); i++) {
            if (classFeature.value(i).equals("TRUE")) {
                positiveValueIndexOfClassFeature = i;
            }
        }

        train.setClassIndex(classFeatureIndex);
        test.setClassIndex(classFeatureIndex);

        // build and evaluate classifier
        pClassifier.buildClassifier(train);
        eval.evaluateModel(pClassifier, test);

        // add predictions
        //           AddClassification filter = new AddClassification();
        //           filter.setClassifier(pClassifier);
        //           filter.setOutputClassification(true);
        //           filter.setOutputDistribution(true);
        //           filter.setOutputErrorFlag(true);
        //           filter.setInputFormat(train);
        //           Filter.useFilter(train, filter); 
        //           Instances pred = Filter.useFilter(test, filter); 
        //           if (predictedData == null)
        //             predictedData = new Instances(pred, 0);
        //           
        //           for (int j = 0; j < pred.numInstances(); j++)
        //             predictedData.add(pred.instance(j));
    }
    double accuracy = (eval.numTruePositives(positiveValueIndexOfClassFeature)
            + eval.numTrueNegatives(positiveValueIndexOfClassFeature))
            / (eval.numTruePositives(positiveValueIndexOfClassFeature)
                    + eval.numFalsePositives(positiveValueIndexOfClassFeature)
                    + eval.numFalseNegatives(positiveValueIndexOfClassFeature)
                    + eval.numTrueNegatives(positiveValueIndexOfClassFeature));

    double fmeasure = 2 * ((eval.precision(positiveValueIndexOfClassFeature)
            * eval.recall(positiveValueIndexOfClassFeature))
            / (eval.precision(positiveValueIndexOfClassFeature)
                    + eval.recall(positiveValueIndexOfClassFeature)));
    File wekaOutput = new File(baseFolderPath + projectName + "/predictors.csv");
    PrintWriter pw1 = new PrintWriter(wekaOutput);

    pw1.write(accuracy + ";" + eval.precision(positiveValueIndexOfClassFeature) + ";"
            + eval.recall(positiveValueIndexOfClassFeature) + ";" + fmeasure + ";"
            + eval.areaUnderROC(positiveValueIndexOfClassFeature));

    System.out.println(projectName + ";" + pClassifierName + ";" + pModelName + ";"
            + eval.numTruePositives(positiveValueIndexOfClassFeature) + ";"
            + eval.numFalsePositives(positiveValueIndexOfClassFeature) + ";"
            + eval.numFalseNegatives(positiveValueIndexOfClassFeature) + ";"
            + eval.numTrueNegatives(positiveValueIndexOfClassFeature) + ";" + accuracy + ";"
            + eval.precision(positiveValueIndexOfClassFeature) + ";"
            + eval.recall(positiveValueIndexOfClassFeature) + ";" + fmeasure + ";"
            + eval.areaUnderROC(positiveValueIndexOfClassFeature) + "\n");
}

From source file:jjj.asap.sas.ensemble.impl.CrossValidatedEnsemble.java

License:Open Source License

@Override
public StrongLearner build(int essaySet, String ensembleName, List<WeakLearner> learners) {

    // can't handle empty case
    if (learners.isEmpty()) {
        return this.ensemble.build(essaySet, ensembleName, learners);
    }//  ww w.java2 s .  co  m

    // create a dummy dataset.
    DatasetBuilder builder = new DatasetBuilder();
    builder.addVariable("id");
    builder.addNominalVariable("class", Contest.getRubrics(essaySet));
    Instances dummy = builder.getDataset("dummy");

    // add data
    Map<Double, Double> groundTruth = Contest.getGoldStandard(essaySet);
    for (double id : learners.get(0).getPreds().keySet()) {
        dummy.add(new DenseInstance(1.0, new double[] { id, groundTruth.get(id) }));
    }

    // stratify
    dummy.sort(0);
    dummy.randomize(new Random(1));
    dummy.setClassIndex(1);
    dummy.stratify(nFolds);

    // now evaluate each fold
    Map<Double, Double> preds = new HashMap<Double, Double>();
    for (int k = 0; k < nFolds; k++) {
        Instances train = dummy.trainCV(nFolds, k);
        Instances test = dummy.testCV(nFolds, k);

        List<WeakLearner> cvLeaners = new ArrayList<WeakLearner>();
        for (WeakLearner learner : learners) {
            WeakLearner copy = learner.copyOf();
            for (int i = 0; i < test.numInstances(); i++) {
                copy.getPreds().remove(test.instance(i).value(0));
                copy.getProbs().remove(test.instance(i).value(0));
            }
            cvLeaners.add(copy);
        }

        // train on fold
        StrongLearner cv = this.ensemble.build(essaySet, ensembleName, cvLeaners);

        List<WeakLearner> testLeaners = new ArrayList<WeakLearner>();
        for (WeakLearner learner : cv.getLearners()) {
            WeakLearner copy = learner.copyOf();
            copy.getPreds().clear();
            copy.getProbs().clear();
            WeakLearner source = find(copy.getName(), learners);
            for (int i = 0; i < test.numInstances(); i++) {
                double id = test.instance(i).value(0);
                copy.getPreds().put(id, source.getPreds().get(id));
                copy.getProbs().put(id, source.getProbs().get(id));
            }
            testLeaners.add(copy);
        }

        preds.putAll(this.ensemble.classify(essaySet, ensembleName, testLeaners, cv.getContext()));
    }

    // now prepare final result

    StrongLearner strong = this.ensemble.build(essaySet, ensembleName, learners);

    double trainingError = strong.getKappa();
    double cvError = Calc.kappa(essaySet, preds, groundTruth);
    //   Job.log(essaySet+"-"+ensembleName, "XVAL: training error = " + trainingError + " cv error = " + cvError);      

    strong.setKappa(cvError);
    return strong;
}