List of usage examples for weka.classifiers Evaluation evaluateModel
public static String evaluateModel(Classifier classifier, String[] options) throws Exception
From source file:focusedCrawler.target.PEBL.java
License:Open Source License
private String buildClassifier(String suffix) throws Exception { String trainingData = rootDir + File.separator + "trainData_" + suffix; // System.out.println("TRAIN:" +trainingData); String trainWekafile = wekaFileDir + "weka_" + suffix; String testFileDir = rootDir + File.separator + "testData_" + suffix; String outputModel = rootDir + File.separator + "model" + File.separator + "model_" + suffix; CreateTCWekaInput createWekaFile = new CreateTCWekaInput(new File(trainingData), new File(testFileDir), stoplist);/*from w w w . ja va2 s .c om*/ attributes = createWekaFile.centroid2Weka(trainWekafile); double max = Double.MIN_VALUE; double cValue = 0; int count = 0; for (double c = 0.0625; count < 1; c = c * 0.5) { SMO classifier = new SMO(); String[] argum = new String[] { "-t", trainWekafile, "-C", "" + c, "-v", "-d", outputModel + c }; String output = Evaluation.evaluateModel(classifier, argum); int index = output.indexOf("Correctly Classified Instances"); if (index >= 0) { int end = output.indexOf("%", index); String line = (output.substring(index, end)).trim(); line = line.substring(line.lastIndexOf(" ")); double accuracy = Double.parseDouble(line.trim()); System.out.println("C=" + c + " acc=" + accuracy); if (accuracy > max) { max = accuracy; cValue = c; } } count++; if (c == 1) { testClassifier(trainWekafile + "_test", outputModel + c); } } return outputModel + cValue; }
From source file:function.PercentageSplit.java
public static void percentageSplit(Instances data, Classifier cls) throws Exception { int trainSize = (int) Math.round(data.numInstances() * 0.8); int testSize = data.numInstances() - trainSize; Instances train = new Instances(data, 0, trainSize); Instances test = new Instances(data, trainSize, testSize); Evaluation eval = new Evaluation(train); eval.evaluateModel(cls, test); System.out.println(eval.toSummaryString()); }
From source file:function.PercentageSplit.java
public static double percentageSplitRate(Instances data, Classifier cls) throws Exception { int trainSize = (int) Math.round(data.numInstances() * 0.8); int testSize = data.numInstances() - trainSize; Instances train = new Instances(data, 0, trainSize); Instances test = new Instances(data, trainSize, testSize); Evaluation eval = new Evaluation(train); eval.evaluateModel(cls, test); return eval.pctCorrect(); }
From source file:general.Util.java
/** * show learning statistic result by full-training method * @param data training data/*from ww w . j a v a 2 s. c o m*/ */ public static void FullSchema(Instances data) { try { Evaluation eval = new Evaluation(data); eval.evaluateModel(classifier, data); System.out.println(eval.toSummaryString("\nResults Full-Training\n\n", false)); } catch (Exception ex) { Logger.getLogger(Util.class.getName()).log(Level.SEVERE, null, ex); } }
From source file:general.Util.java
/** * show learning statistic result by using test sets * @param testPath test path file/* www.j a v a 2 s .c o m*/ * @param typeTestFile test file */ public static void TestSchema(String testPath, String typeTestFile) { Instances testsets = null; // Load test instances based on file type and path if (typeTestFile.equals("arff")) { FileReader file = null; try { file = new FileReader(testPath); try (BufferedReader reader = new BufferedReader(file)) { testsets = new Instances(reader); } // setting class attribute testsets.setClassIndex(data.numAttributes() - 1); } catch (IOException ex) { Logger.getLogger(Util.class.getName()).log(Level.SEVERE, null, ex); } finally { try { if (file != null) { file.close(); } } catch (IOException ex) { Logger.getLogger(Util.class.getName()).log(Level.SEVERE, null, ex); } } } else if (typeTestFile.equals("csv")) { try { CSVLoader csv = new CSVLoader(); csv.setFile(new File(testPath)); data = csv.getDataSet(); // setting class attribute data.setClassIndex(data.numAttributes() - 1); } catch (IOException ex) { Logger.getLogger(Util.class.getName()).log(Level.SEVERE, null, ex); } } // Start evaluate model using instances test and print results try { Evaluation eval = new Evaluation(Util.getData()); eval.evaluateModel(Util.getClassifier(), testsets); System.out.println(eval.toSummaryString("\nResults\n\n", false)); } catch (Exception e) { e.printStackTrace(); } }
From source file:general.Util.java
/** * show learning statistic result by percentage split * @param data training data//from w w w . j av a 2 s . com * @param trainPercent percentage of the training data * @param Classifier model */ public static void PercentageSplit(Instances data, double trainPercent, String Classifier) { try { int trainSize = (int) Math.round(data.numInstances() * trainPercent / 100); int testSize = data.numInstances() - trainSize; data.randomize(new Random(1)); Instances train = new Instances(data, 0, trainSize); Instances test = new Instances(data, trainSize, testSize); train.setClassIndex(train.numAttributes() - 1); test.setClassIndex(test.numAttributes() - 1); switch (Classifier.toLowerCase()) { case "naivebayes": classifier = new NaiveBayes(); break; case "j48-prune": classifier = new MyJ48(true, 0.25f); break; case "j48-unprune": classifier = new MyJ48(false, 0f); break; case "id3": classifier = new MyID3(); break; default: break; } classifier.buildClassifier(train); for (int i = 0; i < test.numInstances(); i++) { try { double pred = classifier.classifyInstance(test.instance(i)); System.out.print("ID: " + test.instance(i)); System.out .print(", actual: " + test.classAttribute().value((int) test.instance(i).classValue())); System.out.println(", predicted: " + test.classAttribute().value((int) pred)); } catch (Exception ex) { Logger.getLogger(Util.class.getName()).log(Level.SEVERE, null, ex); } } // Start evaluate model using instances test and print results try { Evaluation eval = new Evaluation(train); eval.evaluateModel(classifier, test); System.out.println(eval.toSummaryString("\nResults\n\n", false)); } catch (Exception e) { e.printStackTrace(); } } catch (Exception ex) { Logger.getLogger(Util.class.getName()).log(Level.SEVERE, null, ex); } }
From source file:gr.demokritos.iit.cpgislanddetection.CpGIslandDetection.java
License:Apache License
/** * @param args the command line arguments *//*ww w .jav a2 s . c om*/ public static void main(String[] args) throws IOException, ParseException, Exception { // String sFileNameArgs = args[0]; // String[] fileNames = null; // Read file //IGenomicSequenceFileReader reader = new SequenceListFileReader(); // String seq ="GCTCTTGACTTTCAGACTTCCTGAAAACAACGTTCTGGTAAGGACAAGGGTT"; // // CpGIslandIdentification iClass = new CpGIslandIdentification(); // boolean b = iClass.identify(seq); // System.out.println("This sequence is a CpG island: " + b); // SequenceListFileReader s = new SequenceListFileReader(); // ArrayList<BaseSequence> alRes = new ArrayList<>(); // // alRes = s.getSequencesFromFile("C:\\Users\\Xenia\\Desktop\\files\\posSamples.txt"); // for(int i=0; i<alRes.size(); i++) // System.out.println("alRes = " + i + alRes.get(i)); // VectorAnalyzer vA = new VectorAnalyzer(); // List<Vector<Integer>> listVector = new ArrayList<>(); //Vector<Vector<Integer>> list = // listVector = vA.analyze(alRes); // for(int i=0; i<listVector.size();i++) // System.out.println(i + " " +listVector.get(i)); //IGenomicSequenceFileReader reader = new FASTAFileReader(); // If no input file has been given /* if (args.length == 0) { // Use default fileNames[0] = "C:\\Users\\Xenia\\Desktop\\files\\posSamples.txt"; fileNames[1] = "C:\\Users\\Xenia\\Desktop\\files\\negSamples.txt"; fileNames[2] = "C:\\Users\\Xenia\\Desktop\\files\\newsamples.txt"; } else // else use the provided one { fileNames = sFileNameArgs.split(";"); } */ //-----------------VECTOR ANALYSIS STARTS HERE-------------------------------------- //read sequences from txt files SequenceListFileReader reader = new SequenceListFileReader(); ArrayList<BaseSequence> lSeqs1 = new ArrayList<>(); ArrayList<BaseSequence> lSeqs2 = new ArrayList<>(); lSeqs1 = reader.getSequencesFromFile("C:\\Users\\Xenia\\Desktop\\files\\posSamples.txt"); lSeqs2 = reader.getSequencesFromFile("C:\\Users\\Xenia\\Desktop\\files\\negSamples.txt"); //create vectors for every sequence List<Vector<Integer>> listVectorForPositiveSamples = new ArrayList<>(); List<Vector<Integer>> listVectorForNegativeSamples = new ArrayList<>(); VectorAnalyzer v = new VectorAnalyzer(); listVectorForPositiveSamples = v.analyze(lSeqs1); listVectorForNegativeSamples = v.analyze(lSeqs2); //create ARFF files for positive and negative samples FileCreatorARFF fc = new FileCreatorARFF(); Instances positiveInstances = fc.createARFF(listVectorForPositiveSamples, "yes"); Instances negativeInstances = fc.createARFF(listVectorForNegativeSamples, "no"); //System.out.println(positiveInstances); //build and train classifier // setting class attribute positiveInstances.setClassIndex(positiveInstances.numAttributes() - 1); negativeInstances.setClassIndex(negativeInstances.numAttributes() - 1); // train NaiveBayes NaiveBayesUpdateable nb = new NaiveBayesUpdateable(); nb.buildClassifier(positiveInstances); nb.buildClassifier(negativeInstances); Instance current; for (int i = 0; i < positiveInstances.numInstances(); i++) { current = positiveInstances.instance(i); nb.updateClassifier(current); } // Test the model Evaluation eTest = new Evaluation(positiveInstances); Instances isTestingSet = fc.createARFF(listVectorForNegativeSamples, "?"); isTestingSet.setClassIndex(isTestingSet.numAttributes() - 1); eTest.evaluateModel(nb, isTestingSet); //------------------VECTOR ANALYSIS ENDS HERE--------------------------------------- //----------------------------HMM CLASSIFIER STARTS HERE---------------------------------- // Init classifier /* ISequenceClassifier<List<ObservationDiscrete<HmmSequence.Packet>>> classifier = new HmmClassifier(); */ // WARNING: Remember to change when you have normal data!!! // Obfuscation in negative training file? // final boolean bObfuscateNeg = true; // FASTAObfuscatorReader r = new FASTAObfuscatorReader(); //for each file do the same work: train // for (int i = 0; i < 3; i++) { // Read the sequences // If obfuscation is on and we are dealing with the negative // training file /* if ((i == 2) && (bObfuscateNeg)) { //FASTAObfuscatorReader r = new FASTAObfuscatorReader(); lSeqs = r.getSequencesFromFile(fileNames[i]); fileNames[1] = "Not" + fileNames[1]; // Update to indicate different class } else // else read normally lSeqs = reader.getSequencesFromFile(fileNames[i]); System.out.println("lSeqs size="+lSeqs.size()); */ // Create HMM sequences /* ISequenceAnalyst<List<ObservationDiscrete<HmmSequence.Packet>>> analyst = new HmmAnalyzer(); List<List<ObservationDiscrete<HmmSequence.Packet>>> lHmmSeqs = analyst.analyze(lSeqs); // Train classifier with the observations classifier.train(lHmmSeqs, new File(fileNames[i]).getName()); } //Classify the test file //First: Read the sequences lSeqs = r.getSequencesFromFile(fileNames[2]); //System.out.println("file name= "+fileNames[2]); //Then: Create HMM sequences ISequenceAnalyst<List<ObservationDiscrete<HmmSequence.Packet>>> analyst = new HmmAnalyzer(); List<List<ObservationDiscrete<HmmSequence.Packet>>> lHmmSeqs = analyst.analyze(lSeqs); */ //-------------------------------HMM CLASSIFIER ENDS HERE----------------------------------------- /* //----------------------------HMM EVALUATION STARTS----------------------------------------------- //System.out.println("size of lHmmSeqs="+ lHmmSeqs.size()); String str = null; String[] savedResults = new String[lHmmSeqs.size()]; //create a 2x2 array to store successes and failures for each class int[][] matrix = new int[2][2]; int successForCpG = 0, failForCpG = 0, successForNotCpG = 0, failForNotCpG = 0; // Init identifier // CpGIslandIdentification identifier = new CpGIslandIdentification(); CpGIslandIdentification identifier = new CpGIslandIdentificationByList("CpG_hg18.fa"); for (int i = 0; i < lHmmSeqs.size(); i++) { // DEBUG System.err.print("."); if (i % 10 == 0) System.err.println(); //////// str = classifier.classify(lHmmSeqs.get(i)); // System.out.println( "i="+i); System.out.println("Determined class:" + str); // savedResults[i] = str; //kalw sunarthsh pou exetazei an to sequence ikanopoiei ta CpG criterias if (identifier.identify(lSeqs.get(i).getSymbolSequence()) && str.equals(fileNames[0])) { //Success for CpG class successForCpG++; System.out.println("successForCpG" + successForCpG); } else if (identifier.identify(lSeqs.get(i).getSymbolSequence()) && str.equals(fileNames[1])) { //fail for CpG class failForCpG++; System.out.println("failForCpG" + failForCpG); } else if (identifier.identify(lSeqs.get(i).getSymbolSequence()) == false && str.equals(fileNames[1])) { //System.out.println(i); //Success for Not CpG class successForNotCpG++; System.out.println("successForNotCpG" + successForNotCpG); } else if (identifier.identify(lSeqs.get(i).getSymbolSequence()) == false && str.equals(fileNames[0])) { //fail for Not CpG class failForNotCpG++; System.out.println("failForNotCpG" + failForNotCpG); } } //Evaluation: calculation of classification rate and accuracy double totalAccuracy = (successForNotCpG + successForCpG)/(successForCpG + failForCpG + failForNotCpG + successForNotCpG); //missclassification rate for CpG class double rate1 = ( failForCpG + successForCpG ) != 0 ? failForCpG / ( failForCpG + successForCpG ) : 0.0; //missclassification rate for Not CpG class double rate2 = ( failForNotCpG + successForNotCpG ) != 0 ? failForNotCpG / ( failForNotCpG + successForNotCpG ) : 0.0; System.out.println(totalAccuracy +" "+ rate1 + " "+ rate2); NGramGraphClassifier nGramGraphClassifier = new NGramGraphClassifier(); List<List<DocumentNGramGraph>> representation; NGramGraphAnalyzer myAnalyst = new NGramGraphAnalyzer(); representation = myAnalyst.analyze(lSeqs); for(int i=0; i<representation.size();i++) nGramGraphClassifier.classify(representation.get(i)); */ }
From source file:gr.uoc.nlp.opinion.analysis.suggestion.AnalyzeSuggestions.java
/** * * @param classifier/*from www . jav a 2 s. c o m*/ * @param testset */ public void valuateSet(Classifier classifier, Instances testset) { Evaluation eval; try { eval = new Evaluation(this.trainset); eval.evaluateModel(classifier, testset); System.out.println(eval.toSummaryString()); System.out.println(eval.toClassDetailsString()); System.out.println(eval.toMatrixString()); } catch (Exception ex) { Logger.getLogger(AnalyzeSuggestions.class.getName()).log(Level.SEVERE, null, ex); } }
From source file:GroupProject.DMChartUI.java
/** * Action for the generate button/* w ww. jav a 2 s . com*/ * It reads the user input from the table and the selected options and performs * a classifiecation of the user input * the user can choose linear regression, naive bayes classifier, or j48 trees to classify * */ private void generateButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_generateButtonActionPerformed // TODO add your handling code here: // TODO add your handling code here: //File file = new File("studentTemp.csv"); CSVtoArff converter = new CSVtoArff(); Instances students = null; Instances students2 = null; try { converter.convert("studentTemp.csv", "studentTemp.arff"); } catch (IOException ex) { Logger.getLogger(DMChartUI.class.getName()).log(Level.SEVERE, null, ex); } try { students = new Instances(new BufferedReader(new FileReader("studentTemp.arff"))); students2 = new Instances(new BufferedReader(new FileReader("studentTemp.arff"))); } catch (IOException ex) { Logger.getLogger(DMChartUI.class.getName()).log(Level.SEVERE, null, ex); } //get column to predict values for //int target=students.numAttributes()-1; int target = dataSelector.getSelectedIndex() + 1; System.out.printf("this is the target: %d\n", target); //set target students.setClassIndex(target); students2.setClassIndex(target); //case on which radio button is selected //Linear Regressions if (LRB.isSelected()) { LinearRegression model = null; if (Lmodel != null) { model = Lmodel; } else { buildLinearModel(); model = Lmodel; } System.out.println("im doing linear regression"); equationDisplayArea.setText(model.toString()); System.out.println("im going to get the instance"); Instance prediction2 = getInstance(true); Remove remove = new Remove(); int[] toremove = { 0, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 16, 17 }; remove.setAttributeIndicesArray(toremove); try { remove.setInputFormat(students); } catch (Exception ex) { Logger.getLogger(DMChartUI.class.getName()).log(Level.SEVERE, null, ex); } Instances instNew = null; try { instNew = Filter.useFilter(students, remove); } catch (Exception ex) { Logger.getLogger(DMChartUI.class.getName()).log(Level.SEVERE, null, ex); } prediction2.setDataset(instNew); System.err.print("i got the instance"); double result = 0; try { result = model.classifyInstance(prediction2); } catch (Exception ex) { Logger.getLogger(DMChartUI.class.getName()).log(Level.SEVERE, null, ex); } System.out.printf("the result : %f \n ", result); predictValue.setText(Double.toString(result)); System.out.println("I'm done with Linear Regression"); } //Naive Bayes else if (NBB.isSelected()) { Classifier cModel = null; if (NBmodel != null) { cModel = NBmodel; } else { buildNBClassifier(); cModel = NBmodel; } System.out.println("im doing NB"); //build test Evaluation eTest = null; try { eTest = new Evaluation(students); } catch (Exception ex) { Logger.getLogger(DMChartUI.class.getName()).log(Level.SEVERE, null, ex); } System.out.println("Using NB"); try { eTest.evaluateModel(cModel, students); } catch (Exception ex) { Logger.getLogger(DMChartUI.class.getName()).log(Level.SEVERE, null, ex); } //display the test results to console String strSummary = eTest.toSummaryString(); System.out.println(strSummary); //build instance to predict System.out.println("im going to get the instance"); Instance prediction2 = getInstance(false); prediction2.setDataset(students); System.err.print("i got the instance"); //replace with loop stating the class names //fit text based on name of categories double pred = 0; try { pred = cModel.classifyInstance(prediction2); prediction2.setClassValue(pred); } catch (Exception ex) { Logger.getLogger(DMChartUI.class.getName()).log(Level.SEVERE, null, ex); } //get the predicted value and set predictValue to it predictValue.setText(prediction2.classAttribute().value((int) pred)); System.out.println("I'm done with Naive Bayes"); double[] fDistribution2 = null; try { fDistribution2 = cModel.distributionForInstance(prediction2); } catch (Exception ex) { Logger.getLogger(DMChartUI.class.getName()).log(Level.SEVERE, null, ex); } double max = 0; int maxindex = 0; max = fDistribution2[0]; for (int i = 0; i < fDistribution2.length; i++) { if (fDistribution2[i] > max) { maxindex = i; max = fDistribution2[i]; } System.out.println("the value at " + i + " : " + fDistribution2[i]); System.out.println("the label at " + i + prediction2.classAttribute().value(i)); } prediction2.setClassValue(maxindex); predictValue.setText(prediction2.classAttribute().value(maxindex)); } //J48 Tree else if (JB.isSelected()) { System.out.println("im doing j48 "); Classifier jModel = null; if (Jmodel != null) { jModel = Jmodel; } else { buildJClassifier(); jModel = Jmodel; } //test model Evaluation eTest2 = null; try { eTest2 = new Evaluation(students); } catch (Exception ex) { Logger.getLogger(DMChartUI.class.getName()).log(Level.SEVERE, null, ex); } System.out.println("Using J48 test"); try { eTest2.evaluateModel(jModel, students); } catch (Exception ex) { Logger.getLogger(DMChartUI.class.getName()).log(Level.SEVERE, null, ex); } String strSummary2 = eTest2.toSummaryString(); System.out.println(strSummary2); System.out.println("im going to get the instance"); Instance prediction2 = getInstance(false); prediction2.setDataset(students); System.err.print("i got the instance\n"); double pred = 0; try { pred = jModel.classifyInstance(prediction2); prediction2.setClassValue(pred); System.out.println("i did a prediction"); } catch (Exception ex) { Logger.getLogger(DMChartUI.class.getName()).log(Level.SEVERE, null, ex); } //get the predicted value and set predictValue to it System.out.println("this was pred:" + pred); predictValue.setText(prediction2.classAttribute().value((int) pred)); System.out.println("I'm done with J48"); //replace with loop stating the class names //fit text based on name of categories double[] fDistribution2 = null; try { fDistribution2 = jModel.distributionForInstance(prediction2); } catch (Exception ex) { Logger.getLogger(DMChartUI.class.getName()).log(Level.SEVERE, null, ex); } double max = 0; int maxindex = 0; max = fDistribution2[0]; for (int i = 0; i < fDistribution2.length; i++) { if (fDistribution2[i] > max) { maxindex = i; max = fDistribution2[i]; } System.out.println("the value at " + i + " : " + fDistribution2[i]); System.out.println("the label at " + i + " " + prediction2.classAttribute().value(i)); } prediction2.setClassValue(maxindex); predictValue.setText(prediction2.classAttribute().value(maxindex)); } }
From source file:gyc.OverBoostM1.java
License:Open Source License
/** * Boosting method. Boosts using resampling * * @param data the training data to be used for generating the * boosted classifier.//from w ww .j av a2 s . co m * @throws Exception if the classifier could not be built successfully */ protected void buildClassifierUsingResampling(Instances data) throws Exception { Instances trainData, sample, training; double epsilon, reweight, sumProbs; Evaluation evaluation; int numInstances = data.numInstances(); Random randomInstance = new Random(m_Seed); int resamplingIterations = 0; // Initialize data m_Betas = new double[m_Classifiers.length]; m_NumIterationsPerformed = 0; // Create a copy of the data so that when the weights are diddled // with it doesn't mess up the weights for anyone else training = new Instances(data, 0, numInstances); sumProbs = training.sumOfWeights(); for (int i = 0; i < training.numInstances(); i++) { training.instance(i).setWeight(training.instance(i).weight() / sumProbs); } // Do boostrap iterations for (m_NumIterationsPerformed = 0; m_NumIterationsPerformed < m_Classifiers.length; m_NumIterationsPerformed++) { if (m_Debug) { System.err.println("Training classifier " + (m_NumIterationsPerformed + 1)); } // Select instances to train the classifier on if (m_WeightThreshold < 100) { trainData = selectWeightQuantile(training, (double) m_WeightThreshold / 100); } else { trainData = new Instances(training); } // Resample resamplingIterations = 0; double[] weights = new double[trainData.numInstances()]; for (int i = 0; i < weights.length; i++) { weights[i] = trainData.instance(i).weight(); } do { sample = trainData.resampleWithWeights(randomInstance, weights); // int classNum[] = sample.attributeStats(sample.classIndex()).nominalCounts; int minC, nMin = classNum[0]; int majC, nMaj = classNum[1]; if (nMin < nMaj) { minC = 0; majC = 1; } else { minC = 1; majC = 0; nMin = classNum[1]; nMaj = classNum[0]; } //System.out.println("minC="+nMin+"; majC="+nMaj); /* * balance the data which boosting generate for training base classifier */ //System.out.println("before:"+classNum[0]+"-"+classNum[1]); Instances sampleData = randomSampling(sample, majC, minC, nMaj, nMaj, randomInstance); //classNum =sampleData.attributeStats(sampleData.classIndex()).nominalCounts; //System.out.println("after:"+classNum[0]+"-"+classNum[1]); // Build and evaluate classifier m_Classifiers[m_NumIterationsPerformed].buildClassifier(sampleData); evaluation = new Evaluation(data); evaluation.evaluateModel(m_Classifiers[m_NumIterationsPerformed], training); epsilon = evaluation.errorRate(); resamplingIterations++; } while (Utils.eq(epsilon, 0) && (resamplingIterations < MAX_NUM_RESAMPLING_ITERATIONS)); // Stop if error too big or 0 if (Utils.grOrEq(epsilon, 0.5) || Utils.eq(epsilon, 0)) { if (m_NumIterationsPerformed == 0) { m_NumIterationsPerformed = 1; // If we're the first we have to to use it } break; } // Determine the weight to assign to this model m_Betas[m_NumIterationsPerformed] = Math.log((1 - epsilon) / epsilon); reweight = (1 - epsilon) / epsilon; if (m_Debug) { System.err.println("\terror rate = " + epsilon + " beta = " + m_Betas[m_NumIterationsPerformed]); } // Update instance weights setWeights(training, reweight); } }