Example usage for weka.classifiers Evaluation areaUnderROC

List of usage examples for weka.classifiers Evaluation areaUnderROC

Introduction

In this page you can find the example usage for weka.classifiers Evaluation areaUnderROC.

Prototype

public double areaUnderROC(int classIndex) 

Source Link

Document

Returns the area under ROC for those predictions that have been collected in the evaluateClassifier(Classifier, Instances) method.

Usage

From source file:adams.flow.core.EvaluationHelper.java

License:Open Source License

/**
 * Returns a statistical value from the evaluation object.
 *
 * @param eval   the evaluation object to get the value from
 * @param statistic   the type of value to return
 * @param classIndex   the class label index, for statistics like AUC
 * @return      the determined value, Double.NaN if not found
 * @throws Exception   if evaluation fails
 *//*w  w w. j  a  va2s .  com*/
public static double getValue(Evaluation eval, EvaluationStatistic statistic, int classIndex) throws Exception {
    switch (statistic) {
    case NUMBER_CORRECT:
        return eval.correct();
    case NUMBER_INCORRECT:
        return eval.incorrect();
    case NUMBER_UNCLASSIFIED:
        return eval.unclassified();
    case PERCENT_CORRECT:
        return eval.pctCorrect();
    case PERCENT_INCORRECT:
        return eval.pctIncorrect();
    case PERCENT_UNCLASSIFIED:
        return eval.pctUnclassified();
    case KAPPA_STATISTIC:
        return eval.kappa();
    case MEAN_ABSOLUTE_ERROR:
        return eval.meanAbsoluteError();
    case ROOT_MEAN_SQUARED_ERROR:
        return eval.rootMeanSquaredError();
    case RELATIVE_ABSOLUTE_ERROR:
        return eval.relativeAbsoluteError();
    case ROOT_RELATIVE_SQUARED_ERROR:
        return eval.rootRelativeSquaredError();
    case CORRELATION_COEFFICIENT:
        return eval.correlationCoefficient();
    case SF_PRIOR_ENTROPY:
        return eval.SFPriorEntropy();
    case SF_SCHEME_ENTROPY:
        return eval.SFSchemeEntropy();
    case SF_ENTROPY_GAIN:
        return eval.SFEntropyGain();
    case SF_MEAN_PRIOR_ENTROPY:
        return eval.SFMeanPriorEntropy();
    case SF_MEAN_SCHEME_ENTROPY:
        return eval.SFMeanSchemeEntropy();
    case SF_MEAN_ENTROPY_GAIN:
        return eval.SFMeanEntropyGain();
    case KB_INFORMATION:
        return eval.KBInformation();
    case KB_MEAN_INFORMATION:
        return eval.KBMeanInformation();
    case KB_RELATIVE_INFORMATION:
        return eval.KBRelativeInformation();
    case TRUE_POSITIVE_RATE:
        return eval.truePositiveRate(classIndex);
    case NUM_TRUE_POSITIVES:
        return eval.numTruePositives(classIndex);
    case FALSE_POSITIVE_RATE:
        return eval.falsePositiveRate(classIndex);
    case NUM_FALSE_POSITIVES:
        return eval.numFalsePositives(classIndex);
    case TRUE_NEGATIVE_RATE:
        return eval.trueNegativeRate(classIndex);
    case NUM_TRUE_NEGATIVES:
        return eval.numTrueNegatives(classIndex);
    case FALSE_NEGATIVE_RATE:
        return eval.falseNegativeRate(classIndex);
    case NUM_FALSE_NEGATIVES:
        return eval.numFalseNegatives(classIndex);
    case IR_PRECISION:
        return eval.precision(classIndex);
    case IR_RECALL:
        return eval.recall(classIndex);
    case F_MEASURE:
        return eval.fMeasure(classIndex);
    case MATTHEWS_CORRELATION_COEFFICIENT:
        return eval.matthewsCorrelationCoefficient(classIndex);
    case AREA_UNDER_ROC:
        return eval.areaUnderROC(classIndex);
    case AREA_UNDER_PRC:
        return eval.areaUnderPRC(classIndex);
    case WEIGHTED_TRUE_POSITIVE_RATE:
        return eval.weightedTruePositiveRate();
    case WEIGHTED_FALSE_POSITIVE_RATE:
        return eval.weightedFalsePositiveRate();
    case WEIGHTED_TRUE_NEGATIVE_RATE:
        return eval.weightedTrueNegativeRate();
    case WEIGHTED_FALSE_NEGATIVE_RATE:
        return eval.weightedFalseNegativeRate();
    case WEIGHTED_IR_PRECISION:
        return eval.weightedPrecision();
    case WEIGHTED_IR_RECALL:
        return eval.weightedRecall();
    case WEIGHTED_F_MEASURE:
        return eval.weightedFMeasure();
    case WEIGHTED_MATTHEWS_CORRELATION_COEFFICIENT:
        return eval.weightedMatthewsCorrelation();
    case WEIGHTED_AREA_UNDER_ROC:
        return eval.weightedAreaUnderROC();
    case WEIGHTED_AREA_UNDER_PRC:
        return eval.weightedAreaUnderPRC();
    case UNWEIGHTED_MACRO_F_MEASURE:
        return eval.unweightedMacroFmeasure();
    case UNWEIGHTED_MICRO_F_MEASURE:
        return eval.unweightedMicroFmeasure();
    case BIAS:
        return eval.getPluginMetric(Bias.class.getName()).getStatistic(Bias.NAME);
    case RSQUARED:
        return eval.getPluginMetric(RSquared.class.getName()).getStatistic(RSquared.NAME);
    case SDR:
        return eval.getPluginMetric(SDR.class.getName()).getStatistic(SDR.NAME);
    case RPD:
        return eval.getPluginMetric(RPD.class.getName()).getStatistic(RPD.NAME);
    default:
        throw new IllegalArgumentException("Unhandled statistic field: " + statistic);
    }
}

From source file:au.edu.usyd.it.yangpy.sampling.BPSO.java

License:Open Source License

/**
 * this method evaluate a classifier with 
 * the sampled data and internal test data
 * //from  w w  w. j a  v  a  2  s. c o m
 * @param c   classifier
 * @param train   sampled set
 * @param test   internal test set
 * @return   evaluation results
 */
public double classify(Classifier c, Instances train, Instances test) {
    double AUC = 0;
    double FM = 0;
    double GM = 0;

    try {
        c.buildClassifier(train);

        // evaluate classifier
        Evaluation eval = new Evaluation(train);
        eval.evaluateModel(c, test);

        AUC = eval.areaUnderROC(1);
        FM = eval.fMeasure(1);
        GM = eval.truePositiveRate(0);
        GM *= eval.truePositiveRate(1);
        GM = Math.sqrt(GM);

    } catch (IOException ioe) {
        ioe.printStackTrace();
    } catch (Exception e) {
        e.printStackTrace();
    }

    double mean = (AUC + FM + GM) / 3;

    if (verbose == true) {
        System.out.print("AUC: " + dec.format(AUC) + " ");
        System.out.print("FM: " + dec.format(FM) + " ");
        System.out.println("GM: " + dec.format(GM));
        System.out.println("      \\       |       /  ");
        System.out.println("        Mean: " + dec.format(mean));
    }

    return mean;
}

From source file:au.edu.usyd.it.yangpy.snp.Ensemble.java

License:Open Source License

public double classify(Classifier c, int cId) throws Exception {

    // train the classifier with training data
    c.buildClassifier(train);//from   ww  w.  jav  a 2  s .c om

    // get the predict value and predict distribution from each test instances
    for (int i = 0; i < test.numInstances(); i++) {
        predictDistribution[cId][i] = c.distributionForInstance(test.instance(i));
        predictValue[cId][i] = c.classifyInstance(test.instance(i));
    }

    // of course, get the AUC for each classifier
    Evaluation eval = new Evaluation(train);
    eval.evaluateModel(c, test);
    return eval.areaUnderROC(1) * 100;
}

From source file:it.unisa.gitdm.evaluation.WekaEvaluator.java

private static void evaluateModel(String baseFolderPath, String projectName, Classifier pClassifier,
        Instances pInstances, String pModelName, String pClassifierName) throws Exception {

    // other options
    int folds = 10;

    // randomize data
    Random rand = new Random(42);
    Instances randData = new Instances(pInstances);
    randData.randomize(rand);/*  w ww. ja  v a2s . c o  m*/
    if (randData.classAttribute().isNominal()) {
        randData.stratify(folds);
    }

    // perform cross-validation and add predictions
    Instances predictedData = null;
    Evaluation eval = new Evaluation(randData);

    int positiveValueIndexOfClassFeature = 0;
    for (int n = 0; n < folds; n++) {
        Instances train = randData.trainCV(folds, n);
        Instances test = randData.testCV(folds, n);
        // the above code is used by the StratifiedRemoveFolds filter, the
        // code below by the Explorer/Experimenter:
        // Instances train = randData.trainCV(folds, n, rand);

        int classFeatureIndex = 0;
        for (int i = 0; i < train.numAttributes(); i++) {
            if (train.attribute(i).name().equals("isBuggy")) {
                classFeatureIndex = i;
                break;
            }
        }

        Attribute classFeature = train.attribute(classFeatureIndex);
        for (int i = 0; i < classFeature.numValues(); i++) {
            if (classFeature.value(i).equals("TRUE")) {
                positiveValueIndexOfClassFeature = i;
            }
        }

        train.setClassIndex(classFeatureIndex);
        test.setClassIndex(classFeatureIndex);

        // build and evaluate classifier
        pClassifier.buildClassifier(train);
        eval.evaluateModel(pClassifier, test);

        // add predictions
        //           AddClassification filter = new AddClassification();
        //           filter.setClassifier(pClassifier);
        //           filter.setOutputClassification(true);
        //           filter.setOutputDistribution(true);
        //           filter.setOutputErrorFlag(true);
        //           filter.setInputFormat(train);
        //           Filter.useFilter(train, filter); 
        //           Instances pred = Filter.useFilter(test, filter); 
        //           if (predictedData == null)
        //             predictedData = new Instances(pred, 0);
        //           
        //           for (int j = 0; j < pred.numInstances(); j++)
        //             predictedData.add(pred.instance(j));
    }
    double accuracy = (eval.numTruePositives(positiveValueIndexOfClassFeature)
            + eval.numTrueNegatives(positiveValueIndexOfClassFeature))
            / (eval.numTruePositives(positiveValueIndexOfClassFeature)
                    + eval.numFalsePositives(positiveValueIndexOfClassFeature)
                    + eval.numFalseNegatives(positiveValueIndexOfClassFeature)
                    + eval.numTrueNegatives(positiveValueIndexOfClassFeature));

    double fmeasure = 2 * ((eval.precision(positiveValueIndexOfClassFeature)
            * eval.recall(positiveValueIndexOfClassFeature))
            / (eval.precision(positiveValueIndexOfClassFeature)
                    + eval.recall(positiveValueIndexOfClassFeature)));
    File wekaOutput = new File(baseFolderPath + projectName + "/predictors.csv");
    PrintWriter pw1 = new PrintWriter(wekaOutput);

    pw1.write(accuracy + ";" + eval.precision(positiveValueIndexOfClassFeature) + ";"
            + eval.recall(positiveValueIndexOfClassFeature) + ";" + fmeasure + ";"
            + eval.areaUnderROC(positiveValueIndexOfClassFeature));

    System.out.println(projectName + ";" + pClassifierName + ";" + pModelName + ";"
            + eval.numTruePositives(positiveValueIndexOfClassFeature) + ";"
            + eval.numFalsePositives(positiveValueIndexOfClassFeature) + ";"
            + eval.numFalseNegatives(positiveValueIndexOfClassFeature) + ";"
            + eval.numTrueNegatives(positiveValueIndexOfClassFeature) + ";" + accuracy + ";"
            + eval.precision(positiveValueIndexOfClassFeature) + ";"
            + eval.recall(positiveValueIndexOfClassFeature) + ";" + fmeasure + ";"
            + eval.areaUnderROC(positiveValueIndexOfClassFeature) + "\n");
}

From source file:mao.datamining.ModelProcess.java

private void testWithExtraDS(Classifier classifier, Instances finalTrainDataSet, Instances finalTestDataSet,
        FileOutputStream testCaseSummaryOut, TestResult result) {
    //Use final training dataset and final test dataset
    double confusionMatrix[][] = null;

    long start, end, trainTime = 0, testTime = 0;
    if (finalTestDataSet != null) {
        try {/*from   ww  w. j  av a  2 s .  co  m*/
            //counting training time
            start = System.currentTimeMillis();
            classifier.buildClassifier(finalTrainDataSet);
            end = System.currentTimeMillis();
            trainTime += end - start;

            //counting test time
            start = System.currentTimeMillis();
            Evaluation testEvalOnly = new Evaluation(finalTrainDataSet);
            testEvalOnly.evaluateModel(classifier, finalTestDataSet);
            end = System.currentTimeMillis();
            testTime += end - start;

            testCaseSummaryOut.write("=====================================================\n".getBytes());
            testCaseSummaryOut.write((testEvalOnly.toSummaryString("=== Test Summary ===", true)).getBytes());
            testCaseSummaryOut.write("\n".getBytes());
            testCaseSummaryOut
                    .write((testEvalOnly.toClassDetailsString("=== Test Class Detail ===\n")).getBytes());
            testCaseSummaryOut.write("\n".getBytes());
            testCaseSummaryOut
                    .write((testEvalOnly.toMatrixString("=== Confusion matrix for Test ===\n")).getBytes());
            testCaseSummaryOut.flush();

            confusionMatrix = testEvalOnly.confusionMatrix();
            result.setConfusionMatrix4Test(confusionMatrix);

            result.setAUT(testEvalOnly.areaUnderROC(1));
            result.setPrecision(testEvalOnly.precision(1));
            result.setRecall(testEvalOnly.recall(1));
        } catch (Exception e) {
            ModelProcess.logging(null, e);
        }
        result.setTrainingTime(trainTime);
        result.setTestTime(testTime);
    } //using test data set , end

}

From source file:meddle.TrainModelByDomainOS.java

License:Open Source License

/**
 * Do evalution on trained classifier/model, including the summary, false
 * positive/negative rate, AUC, running time
 *
 * @param j48//from  w  w  w.  j a v  a 2s  .c om
 *            - the trained classifier
 * @param domain
 *            - the domain name
 */
public static MetaEvaluationMeasures doEvaluation(Classifier classifier, String domainOS, Instances tras,
        MetaEvaluationMeasures mem) {
    try {
        Evaluation evaluation = new Evaluation(tras);
        evaluation.crossValidateModel(classifier, tras, 10, new Random(1));
        mem.numInstance = evaluation.numInstances();
        double M = evaluation.numTruePositives(1) + evaluation.numFalseNegatives(1);
        mem.numPositive = (int) M;
        mem.AUC = evaluation.areaUnderROC(1);
        mem.numCorrectlyClassified = (int) evaluation.correct();
        mem.accuracy = 1.0 * mem.numCorrectlyClassified / mem.numInstance;
        mem.falseNegativeRate = evaluation.falseNegativeRate(1);
        mem.falsePositiveRate = evaluation.falsePositiveRate(1);
        mem.fMeasure = evaluation.fMeasure(1);
        double[][] cmMatrix = evaluation.confusionMatrix();
        mem.confusionMatrix = cmMatrix;
        mem.TP = evaluation.numTruePositives(1);
        mem.TN = evaluation.numTrueNegatives(1);
        mem.FP = evaluation.numFalsePositives(1);
        mem.FN = evaluation.numFalseNegatives(1);
    } catch (Exception e) {
        e.printStackTrace();
    }

    return mem;
}

From source file:org.openml.webapplication.io.Output.java

License:Open Source License

public static Map<Metric, MetricScore> evaluatorToMap(Evaluation evaluator, int classes, TaskType task)
        throws Exception {
    Map<Metric, MetricScore> m = new HashMap<Metric, MetricScore>();

    if (task == TaskType.REGRESSION) {

        // here all measures for regression tasks
        m.put(new Metric("mean_absolute_error", "openml.evaluation.mean_absolute_error(1.0)"),
                new MetricScore(evaluator.meanAbsoluteError(), (int) evaluator.numInstances()));
        m.put(new Metric("mean_prior_absolute_error", "openml.evaluation.mean_prior_absolute_error(1.0)"),
                new MetricScore(evaluator.meanPriorAbsoluteError(), (int) evaluator.numInstances()));
        m.put(new Metric("root_mean_squared_error", "openml.evaluation.root_mean_squared_error(1.0)"),
                new MetricScore(evaluator.rootMeanSquaredError(), (int) evaluator.numInstances()));
        m.put(new Metric("root_mean_prior_squared_error",
                "openml.evaluation.root_mean_prior_squared_error(1.0)"),
                new MetricScore(evaluator.rootMeanPriorSquaredError(), (int) evaluator.numInstances()));
        m.put(new Metric("relative_absolute_error", "openml.evaluation.relative_absolute_error(1.0)"),
                new MetricScore(evaluator.relativeAbsoluteError() / 100, (int) evaluator.numInstances()));
        m.put(new Metric("root_relative_squared_error", "openml.evaluation.root_relative_squared_error(1.0)"),
                new MetricScore(evaluator.rootRelativeSquaredError() / 100, (int) evaluator.numInstances()));

    } else if (task == TaskType.CLASSIFICATION || task == TaskType.LEARNINGCURVE
            || task == TaskType.TESTTHENTRAIN) {

        m.put(new Metric("average_cost", "openml.evaluation.average_cost(1.0)"),
                new MetricScore(evaluator.avgCost(), (int) evaluator.numInstances()));
        m.put(new Metric("total_cost", "openml.evaluation.total_cost(1.0)"),
                new MetricScore(evaluator.totalCost(), (int) evaluator.numInstances()));

        m.put(new Metric("mean_absolute_error", "openml.evaluation.mean_absolute_error(1.0)"),
                new MetricScore(evaluator.meanAbsoluteError(), (int) evaluator.numInstances()));
        m.put(new Metric("mean_prior_absolute_error", "openml.evaluation.mean_prior_absolute_error(1.0)"),
                new MetricScore(evaluator.meanPriorAbsoluteError(), (int) evaluator.numInstances()));
        m.put(new Metric("root_mean_squared_error", "openml.evaluation.root_mean_squared_error(1.0)"),
                new MetricScore(evaluator.rootMeanSquaredError(), (int) evaluator.numInstances()));
        m.put(new Metric("root_mean_prior_squared_error",
                "openml.evaluation.root_mean_prior_squared_error(1.0)"),
                new MetricScore(evaluator.rootMeanPriorSquaredError(), (int) evaluator.numInstances()));
        m.put(new Metric("relative_absolute_error", "openml.evaluation.relative_absolute_error(1.0)"),
                new MetricScore(evaluator.relativeAbsoluteError() / 100, (int) evaluator.numInstances()));
        m.put(new Metric("root_relative_squared_error", "openml.evaluation.root_relative_squared_error(1.0)"),
                new MetricScore(evaluator.rootRelativeSquaredError() / 100, (int) evaluator.numInstances()));

        m.put(new Metric("prior_entropy", "openml.evaluation.prior_entropy(1.0)"),
                new MetricScore(evaluator.priorEntropy(), (int) evaluator.numInstances()));
        m.put(new Metric("kb_relative_information_score",
                "openml.evaluation.kb_relative_information_score(1.0)"),
                new MetricScore(evaluator.KBRelativeInformation() / 100, (int) evaluator.numInstances()));

        Double[] precision = new Double[classes];
        Double[] recall = new Double[classes];
        Double[] auroc = new Double[classes];
        Double[] fMeasure = new Double[classes];
        Double[] instancesPerClass = new Double[classes];
        double[][] confussion_matrix = evaluator.confusionMatrix();
        for (int i = 0; i < classes; ++i) {
            precision[i] = evaluator.precision(i);
            recall[i] = evaluator.recall(i);
            auroc[i] = evaluator.areaUnderROC(i);
            fMeasure[i] = evaluator.fMeasure(i);
            instancesPerClass[i] = 0.0;//from  w  w w.ja v  a 2 s  .co  m
            for (int j = 0; j < classes; ++j) {
                instancesPerClass[i] += confussion_matrix[i][j];
            }
        }

        m.put(new Metric("predictive_accuracy", "openml.evaluation.predictive_accuracy(1.0)"),
                new MetricScore(evaluator.pctCorrect() / 100, (int) evaluator.numInstances()));
        m.put(new Metric("kappa", "openml.evaluation.kappa(1.0)"),
                new MetricScore(evaluator.kappa(), (int) evaluator.numInstances()));

        m.put(new Metric("number_of_instances", "openml.evaluation.number_of_instances(1.0)"),
                new MetricScore(evaluator.numInstances(), instancesPerClass, (int) evaluator.numInstances()));

        m.put(new Metric("precision", "openml.evaluation.precision(1.0)"),
                new MetricScore(evaluator.weightedPrecision(), precision, (int) evaluator.numInstances()));
        m.put(new Metric("recall", "openml.evaluation.recall(1.0)"),
                new MetricScore(evaluator.weightedRecall(), recall, (int) evaluator.numInstances()));
        m.put(new Metric("f_measure", "openml.evaluation.f_measure(1.0)"),
                new MetricScore(evaluator.weightedFMeasure(), fMeasure, (int) evaluator.numInstances()));
        if (Utils.isMissingValue(evaluator.weightedAreaUnderROC()) == false) {
            m.put(new Metric("area_under_roc_curve", "openml.evaluation.area_under_roc_curve(1.0)"),
                    new MetricScore(evaluator.weightedAreaUnderROC(), auroc, (int) evaluator.numInstances()));
        }
        m.put(new Metric("confusion_matrix", "openml.evaluation.confusion_matrix(1.0)"),
                new MetricScore(confussion_matrix));
    }
    return m;
}

From source file:tcc.FeatureExtraction.java

public void knn() throws IOException {
    //parsing CSV to Arff
    CSVLoader loader = new CSVLoader();
    loader.setSource(new File("/root/TCC/Resultados/Parte 4 - Novos Casos/TamuraHaralickMomentos.csv"));
    Instances inst = loader.getDataSet();

    ArffSaver saver = new ArffSaver();
    saver.setInstances(inst);/*w  w w. ja v a2s .  c  om*/
    saver.setFile(new File("/root/TCC/Resultados/Parte 4 - Novos Casos/TamuraHaralickMomentos.arff"));
    saver.setDestination(new File("/root/TCC/Resultados/Parte 4 - Novos Casos/TamuraHaralickMomentos.arff"));
    saver.writeBatch();

    BufferedReader reader = new BufferedReader(
            new FileReader("/root/TCC/Resultados/Parte 4 - Novos Casos/TamuraHaralickMomentos.arff"));
    Instances data = new Instances(reader);
    reader.close();
    data.setClassIndex(data.numAttributes() - 1);

    //Normalizando
    try {
        Normalize norm = new Normalize();
        norm.setInputFormat(data);
        data = Filter.useFilter(data, norm);

    } catch (Exception ex) {
        Logger.getLogger(FeatureExtraction.class.getName()).log(Level.SEVERE, null, ex);
    }

    File csv = new File("/root/TCC/Resultados/knn.csv");
    FileWriter fw = new FileWriter(csv);
    BufferedWriter bw = new BufferedWriter(fw);

    for (int i = 1; i < 51; i++) {
        //instanciando o classificador
        IBk knn = new IBk();
        knn.setKNN(i);

        try {

            knn.buildClassifier(data);
            Evaluation eval = new Evaluation(data);
            //System.out.println(eval.toSummaryString("\nResults\n======\n", false));
            eval.crossValidateModel(knn, data, 10, new Random(1), new Object[] {});
            double auc = eval.areaUnderROC(1);
            System.out.println(auc);
            bw.write(Double.toString(auc));
            bw.newLine();

        } catch (Exception ex) {
            Logger.getLogger(FeatureExtraction.class.getName()).log(Level.SEVERE, null, ex);
        }
    }
    bw.close();

}

From source file:tcc.FeatureExtraction.java

public void rbf() throws IOException {
    //parsing CSV to Arff
    CSVLoader loader = new CSVLoader();
    loader.setSource(new File("/root/TCC/Resultados/Parte 4 - Novos Casos/TamuraHaralickMomentos.csv"));
    Instances inst = loader.getDataSet();

    ArffSaver saver = new ArffSaver();
    saver.setInstances(inst);//from www  .java 2  s .  c om
    saver.setFile(new File("/root/TCC/Resultados/Parte 4 - Novos Casos/TamuraHaralickMomentos.arff"));
    saver.setDestination(new File("/root/TCC/Resultados/Parte 4 - Novos Casos/TamuraHaralickMomentos.arff"));
    saver.writeBatch();

    BufferedReader reader = new BufferedReader(
            new FileReader("/root/TCC/Resultados/Parte 4 - Novos Casos/TamuraHaralickMomentos.arff"));
    Instances data = new Instances(reader);
    reader.close();
    data.setClassIndex(data.numAttributes() - 1);

    //Normalizando
    try {
        Normalize norm = new Normalize();
        norm.setInputFormat(data);
        data = Filter.useFilter(data, norm);

    } catch (Exception ex) {
        Logger.getLogger(FeatureExtraction.class.getName()).log(Level.SEVERE, null, ex);
    }

    File csv = new File("/root/TCC/Resultados/rbf.csv");
    FileWriter fw = new FileWriter(csv);
    BufferedWriter bw = new BufferedWriter(fw);

    for (int i = 1; i < 51; i++) {
        //instanciando o classificador
        RBFNetwork rbf = new RBFNetwork();
        rbf.setNumClusters(i);

        try {

            rbf.buildClassifier(data);
            Evaluation eval = new Evaluation(data);
            //System.out.println(eval.toSummaryString("\nResults\n======\n", false));
            eval.crossValidateModel(rbf, data, 10, new Random(1), new Object[] {});
            double auc = eval.areaUnderROC(1);
            System.out.println(auc);
            bw.write(Double.toString(auc));
            bw.newLine();

        } catch (Exception ex) {
            Logger.getLogger(FeatureExtraction.class.getName()).log(Level.SEVERE, null, ex);
        }
    }
    bw.close();

}