List of usage examples for weka.classifiers Evaluation Evaluation
public Evaluation(Instances data) throws Exception
From source file:Pair.java
License:Open Source License
private double getTestError() throws Exception { Evaluation evaluation;//from w w w . ja v a2 s . c o m evaluation = new Evaluation(testData); evaluation.evaluateModel(this, testData); return evaluation.errorRate(); }
From source file:CopiaSeg3.java
public static Evaluation simpleClassify(Classifier model, Instances trainingSet, Instances testingSet) throws Exception { Evaluation validation = new Evaluation(trainingSet); model.buildClassifier(trainingSet);/*from w ww. j a va 2 s . c o m*/ validation.evaluateModel(model, testingSet); // Imprime el resultado de Weka explorer: String strSummary = validation.toSummaryString(); System.out.println(strSummary); return validation; }
From source file:MLP.java
MLP() { try {/* w w w .j ava 2s. c o m*/ FileReader trainreader = new FileReader("C:\\new.arff"); FileReader testreader = new FileReader("C:\\new.arff"); Instances train = new Instances(trainreader); Instances test = new Instances(testreader); train.setClassIndex(train.numAttributes() - 1); test.setClassIndex(test.numAttributes() - 1); MultilayerPerceptron mlp = new MultilayerPerceptron(); mlp.setOptions(Utils.splitOptions("-L 0.3 -M 0.2 -N 500 -V 0 -S 0 -E 20 -H 4")); mlp.buildClassifier(train); Evaluation eval = new Evaluation(train); eval.evaluateModel(mlp, test); System.out.println(eval.toSummaryString("\nResults\n======\n", false)); trainreader.close(); testreader.close(); } catch (Exception ex) { ex.printStackTrace(); } }
From source file:MainFrame.java
private void jButton1MouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_jButton1MouseClicked double[][] food_sources = new double[0][0]; Classifier classifier;//from w w w . ja v a2 s . c om Evaluation eval; int N; PreparingSteps pr = new PreparingSteps(); int iterationnumber = Integer.parseInt(iterationnumber_cb.getSelectedItem().toString()); double modificationRate = Double.parseDouble(modificationrate_cb.getSelectedItem().toString()); int foldnumber = Integer.parseInt(crossvalfold_cb.getSelectedItem().toString()); //statuslabel.setText("Calculating..."); try { N = pr.getReadFileData(path).numAttributes(); Instances data = pr.getReadFileData(path); food_sources = pr.createFoodSources(data.numAttributes(), food_sources); // food sources olusturuluyor //////////////////////////////////////////////////////// + Debug.Random rand = new Debug.Random(1); classifier = new IBk(); // snflandrc olusturuldu eval = new Evaluation(data); for (int i = 0; i < N - 1; i++) { food_source = new double[N]; for (int j = 0; j < N; j++) { food_source[j] = food_sources[i][j]; } Instances data1 = pr.getReadFileData(path); food_sources[i][N - 1] = pr.getSourceFitnessValue(foldnumber, N, rand, data1, food_source, eval, classifier); } ////////////////// +++++ BeesProcesses bees = new BeesProcesses(); double[] neighbor; int e = 0; while (e < iterationnumber) { System.out.println("iter:" + e); for (int i = 0; i < N - 1; i++) { neighbor = new double[N]; food_source = new double[N]; for (int j = 0; j < N; j++) food_source[j] = food_sources[i][j]; Instances data2 = pr.getReadFileData(path); neighbor = bees.employedBeeProcess(food_source, modificationRate, i); // komsu olusturuldu neighbor[N - 1] = pr.getSourceFitnessValue(foldnumber, N, rand, data2, neighbor, eval, classifier); // komsunun fitness degeri bulunuyor double a = food_source[N - 1]; double b = neighbor[N - 1]; if (b > a) { for (int j = 0; j < N; j++) { food_sources[i][j] = neighbor[j]; } } } e++; } // while sonu double[][] onlooker_foodsources = new double[N - 1][N]; onlooker_foodsources = bees.onlookerBeeProcess(N, 0.5); for (int i = 0; i < N - 1; i++) { double[] onlooker_food_source = new double[N]; for (int j = 0; j < N; j++) { onlooker_food_source[j] = onlooker_foodsources[i][j]; } Instances data3 = pr.getReadFileData(path); onlooker_foodsources[i][N - 1] = pr.getSourceFitnessValue(foldnumber, N, rand, data3, onlooker_food_source, eval, classifier); } int m = 0; while (m < 20) { double[][] onlooker_foodsources2 = new double[N - 1][N]; onlooker_foodsources2 = bees.onlookerBeeProcess(N, 0.5); for (int i = 0; i < N - 1; i++) { double[] onlooker_food_source = new double[N]; for (int j = 0; j < N; j++) { onlooker_food_source[j] = onlooker_foodsources2[i][j]; } Instances data4 = pr.getReadFileData(path); onlooker_food_source[N - 1] = pr.getSourceFitnessValue(foldnumber, N, rand, data4, onlooker_food_source, eval, classifier); for (int j = 0; j < N - 1; j++) { if (onlooker_foodsources[j][N - 1] < onlooker_foodsources2[j][N - 1]) { for (int k = 0; k < N; k++) { onlooker_foodsources[j][k] = onlooker_foodsources[j][k]; } } } } m++; } /////// feature selection double[] selected_features = new double[N]; double max_fit = 0.0; for (int i = 0; i < N - 1; i++) { if (food_sources[i][N - 1] > max_fit) { max_fit = food_sources[i][N - 1]; for (int j = 0; j < N; j++) { selected_features[j] = food_sources[i][j]; } } } for (int i = 0; i < N - 1; i++) { if (onlooker_foodsources[i][N - 1] > max_fit) { max_fit = food_sources[i][N - 1]; for (int j = 0; j < N; j++) { selected_features[j] = onlooker_foodsources[i][j]; } } } //////////// System.out.println(" "); String sf_wfmeasure = ""; for (int i = 0; i < N; i++) { System.out.print(selected_features[i] + " "); if (i == N - 1) { sf_wfmeasure = Double.toString(selected_features[i]); } else { if (selected_features[i] == 1.0) sf_indexes = sf_indexes + Integer.toString(i) + ","; } } selectedfeaturesindexes_tf.setText(sf_indexes); //weightedfmeasure_tf.setText(sf_wfmeasure); //statuslabel.setText("Finished."); } catch (Exception ex) { Logger.getLogger(MainFrame.class.getName()).log(Level.SEVERE, null, ex); } }
From source file:GrowTree.java
public void crossvalidaion() throws Exception { DataSource source = new DataSource( "F:\\backup\\BTH\\#6DV2542 Machine Learning\\WEKA experiments\\UCI\\iris.arff"); Instances newData = source.getDataSet(); Evaluation eval = new Evaluation(newData); eval.crossValidateModel(tree, newData, 10, new Random(1)); System.out.println(eval.toSummaryString("\nResults\n======\n", false)); }
From source file:WrapperSubset.java
License:Open Source License
/** * Evaluates a subset of attributes/*from w w w . j a va 2s . com*/ * * @param subset a bitset representing the attribute subset to be evaluated * @return the error rate * @throws Exception if the subset could not be evaluated */ @Override public double evaluateSubset(BitSet subset) throws Exception { // if (subset.isEmpty()) // return 0.0; double evalMetric = 0; double[] repError = new double[5]; int numAttributes = 0; int i, j; Random Rnd = new Random(m_seed); Remove delTransform = new Remove(); delTransform.setInvertSelection(true); // copy the instances Instances trainCopy = new Instances(m_trainInstances); // count attributes set in the BitSet for (i = 0; i < m_numAttribs; i++) { if (subset.get(i)) { numAttributes++; } } // set up an array of attribute indexes for the filter (+1 for the class) int[] featArray = new int[numAttributes + 1]; for (i = 0, j = 0; i < m_numAttribs; i++) { if (subset.get(i)) { featArray[j++] = i; } } featArray[j] = m_classIndex; delTransform.setAttributeIndicesArray(featArray); delTransform.setInputFormat(trainCopy); trainCopy = Filter.useFilter(trainCopy, delTransform); // max of 5 repetitions of cross validation for (i = 0; i < 5; i++) { m_Evaluation = new Evaluation(trainCopy); m_Evaluation.crossValidateModel(m_BaseClassifier, trainCopy, m_folds, Rnd); switch (m_evaluationMeasure) { case EVAL_DEFAULT: repError[i] = m_Evaluation.errorRate(); // if (m_trainInstances.classAttribute().isNominal()) { // repError[i] = 1.0 - repError[i]; // } break; case EVAL_ACCURACY: repError[i] = m_Evaluation.errorRate(); // if (m_trainInstances.classAttribute().isNominal()) { // repError[i] = 1.0 - repError[i]; // } break; case EVAL_RMSE: repError[i] = m_Evaluation.rootMeanSquaredError(); break; case EVAL_MAE: repError[i] = m_Evaluation.meanAbsoluteError(); break; case EVAL_FMEASURE: if (m_IRClassVal < 0) { repError[i] = m_Evaluation.weightedFMeasure(); } else { repError[i] = m_Evaluation.fMeasure(m_IRClassVal); } break; case EVAL_AUC: if (m_IRClassVal < 0) { repError[i] = m_Evaluation.weightedAreaUnderROC(); } else { repError[i] = m_Evaluation.areaUnderROC(m_IRClassVal); } break; case EVAL_AUPRC: if (m_IRClassVal < 0) { repError[i] = m_Evaluation.weightedAreaUnderPRC(); } else { repError[i] = m_Evaluation.areaUnderPRC(m_IRClassVal); } break; case EVAL_NEW: repError[i] = (1.0 - m_Evaluation.errorRate()) + m_IRfactor * m_Evaluation.weightedFMeasure(); break; } // check on the standard deviation if (!repeat(repError, i + 1)) { i++; break; } } for (j = 0; j < i; j++) { evalMetric += repError[j]; } evalMetric /= i; m_Evaluation = null; switch (m_evaluationMeasure) { case EVAL_DEFAULT: case EVAL_ACCURACY: case EVAL_RMSE: case EVAL_MAE: if (m_trainInstances.classAttribute().isNominal() && (m_evaluationMeasure == EVAL_DEFAULT || m_evaluationMeasure == EVAL_ACCURACY)) { evalMetric = 1 - evalMetric; } else { evalMetric = -evalMetric; // maximize } break; } return evalMetric; }
From source file:WekaClassify.java
License:Open Source License
public void EvaluationRun(Instances anInstance) throws Exception { m_Evaluation = new Evaluation(anInstance); // Evaluation modes are // cross-validation (assumed 10 fold) // training-data // split (optional percentage value, default 66%) if (m_EvaluationMode != null) { switch (m_EvaluationMode) { case "cross-validation": int fold = 10; //default if (m_EvaluationOptions.length > 0) { fold = Integer.parseInt(m_EvaluationOptions[0]); }//from www .ja v a 2s. c o m FoldRun(anInstance, fold); break; case "training-set": TrainingSetRun(anInstance); break; case "split": double split = 0.66; if (m_EvaluationOptions.length > 0) { split = Double.parseDouble(m_EvaluationOptions[0]); } SplitRun(anInstance, split); break; default: System.out.println("Incorrect Evaluation Mode: " + m_EvaluationMode); System.exit(-1); break; } } else TrainingSetRun(anInstance); }
From source file:activeSegmentation.learning.WekaClassifier.java
License:Open Source License
/** * Evaluates the classifier using the test dataset and stores the evaluation. * * @param instances The instances to test * @return The evaluation/*from ww w . j a v a2s. com*/ */ @Override public double[] testModel(IDataSet instances) { try { // test the current classifier with the test set Evaluation evaluator = new Evaluation(new Instances(instances.getDataset(), 0)); double[] predict = evaluator.evaluateModel(classifier, instances.getDataset()); System.out.println(evaluator.toSummaryString()); return predict; } catch (Exception e) { Logger.getLogger(WekaClassifier.class.getName()).log(Level.SEVERE, null, e); } return null; }
From source file:adams.flow.transformer.WekaBootstrapping.java
License:Open Source License
/** * Executes the flow item.//from w w w. j a v a 2s. c om * * @return null if everything is fine, otherwise error message */ @Override protected String doExecute() { String result; SpreadSheet sheet; Row row; Evaluation evalAll; Evaluation eval; WekaEvaluationContainer cont; TIntList indices; Random random; int i; int iteration; int size; List<Prediction> preds; Instances header; Instances data; ArrayList<Attribute> atts; Instance inst; boolean numeric; int classIndex; Double[] errors; Double[] errorsRev; Percentile<Double> perc; Percentile<Double> percRev; TIntList subset; result = null; if (m_InputToken.getPayload() instanceof Evaluation) { evalAll = (Evaluation) m_InputToken.getPayload(); } else { cont = (WekaEvaluationContainer) m_InputToken.getPayload(); evalAll = (Evaluation) cont.getValue(WekaEvaluationContainer.VALUE_EVALUATION); } if ((evalAll.predictions() == null) || (evalAll.predictions().size() == 0)) result = "No predictions available!"; if (result == null) { // init spreadsheet sheet = new DefaultSpreadSheet(); row = sheet.getHeaderRow(); row.addCell("S").setContentAsString("Subsample"); for (EvaluationStatistic s : m_StatisticValues) row.addCell(s.toString()).setContentAsString(s.toString()); for (i = 0; i < m_Percentiles.length; i++) { switch (m_ErrorCalculation) { case ACTUAL_MINUS_PREDICTED: row.addCell("perc-AmP-" + i).setContentAsString("Percentile-AmP-" + m_Percentiles[i]); break; case PREDICTED_MINUS_ACTUAL: row.addCell("perc-PmA-" + i).setContentAsString("Percentile-PmA-" + m_Percentiles[i]); break; case ABSOLUTE: row.addCell("perc-Abs-" + i).setContentAsString("Percentile-Abs-" + m_Percentiles[i]); break; case BOTH: row.addCell("perc-AmP-" + i).setContentAsString("Percentile-AmP-" + m_Percentiles[i]); row.addCell("perc-PmA-" + i).setContentAsString("Percentile-PmA-" + m_Percentiles[i]); break; default: throw new IllegalStateException("Unhandled error calculation: " + m_ErrorCalculation); } } // set up bootstrapping preds = evalAll.predictions(); random = new Random(m_Seed); indices = new TIntArrayList(); size = (int) Math.round(preds.size() * m_Percentage); header = evalAll.getHeader(); numeric = header.classAttribute().isNumeric(); m_ClassIndex.setData(header.classAttribute()); if (numeric) classIndex = -1; else classIndex = m_ClassIndex.getIntIndex(); for (i = 0; i < preds.size(); i++) indices.add(i); // create fake evalutions subset = new TIntArrayList(); for (iteration = 0; iteration < m_NumSubSamples; iteration++) { if (isStopped()) { sheet = null; break; } // determine subset.clear(); if (m_WithReplacement) { for (i = 0; i < size; i++) subset.add(indices.get(random.nextInt(preds.size()))); } else { indices.shuffle(random); for (i = 0; i < size; i++) subset.add(indices.get(i)); } // create dataset from predictions errors = new Double[size]; errorsRev = new Double[size]; atts = new ArrayList<>(); atts.add(header.classAttribute().copy("Actual")); data = new Instances(header.relationName() + "-" + (iteration + 1), atts, size); data.setClassIndex(0); for (i = 0; i < subset.size(); i++) { inst = new DenseInstance(preds.get(subset.get(i)).weight(), new double[] { preds.get(subset.get(i)).actual() }); data.add(inst); switch (m_ErrorCalculation) { case ACTUAL_MINUS_PREDICTED: errors[i] = preds.get(subset.get(i)).actual() - preds.get(subset.get(i)).predicted(); break; case PREDICTED_MINUS_ACTUAL: errorsRev[i] = preds.get(subset.get(i)).predicted() - preds.get(subset.get(i)).actual(); break; case ABSOLUTE: errors[i] = Math .abs(preds.get(subset.get(i)).actual() - preds.get(subset.get(i)).predicted()); break; case BOTH: errors[i] = preds.get(subset.get(i)).actual() - preds.get(subset.get(i)).predicted(); errorsRev[i] = preds.get(subset.get(i)).predicted() - preds.get(subset.get(i)).actual(); break; default: throw new IllegalStateException("Unhandled error calculation: " + m_ErrorCalculation); } } // perform "fake" evaluation try { eval = new Evaluation(data); for (i = 0; i < subset.size(); i++) { if (numeric) eval.evaluateModelOnceAndRecordPrediction( new double[] { preds.get(subset.get(i)).predicted() }, data.instance(i)); else eval.evaluateModelOnceAndRecordPrediction( ((NominalPrediction) preds.get(subset.get(i))).distribution().clone(), data.instance(i)); } } catch (Exception e) { result = handleException( "Failed to create 'fake' Evaluation object (iteration: " + (iteration + 1) + ")!", e); break; } // add row row = sheet.addRow(); row.addCell("S").setContent(iteration + 1); for (EvaluationStatistic s : m_StatisticValues) { try { row.addCell(s.toString()).setContent(EvaluationHelper.getValue(eval, s, classIndex)); } catch (Exception e) { getLogger().log(Level.SEVERE, "Failed to calculate statistic in iteration #" + (iteration + 1) + ": " + s, e); row.addCell(s.toString()).setMissing(); } } for (i = 0; i < m_Percentiles.length; i++) { perc = new Percentile<>(); perc.addAll(errors); percRev = new Percentile<>(); percRev.addAll(errorsRev); switch (m_ErrorCalculation) { case ACTUAL_MINUS_PREDICTED: row.addCell("perc-AmP-" + i).setContent(perc.getPercentile(m_Percentiles[i].doubleValue())); break; case PREDICTED_MINUS_ACTUAL: row.addCell("perc-PmA-" + i) .setContent(percRev.getPercentile(m_Percentiles[i].doubleValue())); break; case ABSOLUTE: row.addCell("perc-Abs-" + i).setContent(perc.getPercentile(m_Percentiles[i].doubleValue())); break; case BOTH: row.addCell("perc-AmP-" + i).setContent(perc.getPercentile(m_Percentiles[i].doubleValue())); row.addCell("perc-PmA-" + i) .setContent(percRev.getPercentile(m_Percentiles[i].doubleValue())); break; default: throw new IllegalStateException("Unhandled error calculation: " + m_ErrorCalculation); } } } if ((result == null) && (sheet != null)) m_OutputToken = new Token(sheet); } return result; }
From source file:adams.flow.transformer.WekaStreamEvaluator.java
License:Open Source License
/** * Executes the flow item.// w w w . j a v a2s .c om * * @return null if everything is fine, otherwise error message */ @Override protected String doExecute() { String result; Instance inst; Instances data; result = null; inst = (Instance) m_InputToken.getPayload(); data = inst.dataset(); if (m_Evaluation == null) { try { m_Evaluation = new Evaluation(data); m_Current = 0; m_Header = data; initOutputBuffer(); m_Output.setHeader(m_Header); } catch (Exception e) { result = handleException("Failed to set up evaluation!", e); } } // evaluate/train if (result == null) { try { if (m_Classifier == null) { m_Classifier = getClassifierInstance(); m_Classifier.buildClassifier(data); } if (m_Current > 0) { if (m_DiscardPredictions) m_Evaluation.evaluateModelOnce(m_Classifier, inst); else m_Evaluation.evaluateModelOnceAndRecordPrediction(m_Classifier, inst); } ((UpdateableClassifier) m_Classifier).updateClassifier(inst); } catch (Exception e) { result = handleException("Failed to evaluate/update the classifier!", e); } } // output? m_Current++; if (m_Current % m_Interval == 0) { if (m_Output instanceof Null) { m_OutputToken = new Token(new WekaEvaluationContainer(m_Evaluation)); } else { if (m_AlwaysUseContainer) m_OutputToken = new Token( new WekaEvaluationContainer(m_Evaluation, null, m_Output.getBuffer().toString())); else m_OutputToken = new Token(m_Output.getBuffer().toString()); } } return result; }