List of usage examples for opennlp.tools.tokenize TokenSampleStream TokenSampleStream
public TokenSampleStream(ObjectStream<String> sentences)
From source file:com.civprod.writerstoolbox.OpenNLP.training.TokenizerTrainer.java
private void cmdTrainActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_cmdTrainActionPerformed final TokenizerTrainer tempThis = this; new Thread(() -> { textTestResults.setText(""); Charset charset = Charset.forName("UTF-8"); //create TokenizerFactory part of the training context String alphaNumericRegex = txtAlphaNumericPattern.getText(); alphaNumericRegex = alphaNumericRegex.trim(); if (alphaNumericRegex.isEmpty()) { alphaNumericRegex = "^[A-Za-z0-9]+$"; }// www. ja v a2 s . c o m Pattern alphaNumericPattern = Pattern.compile(alphaNumericRegex); TokenizerFactory myTokenizerFactory = new TokenizerFactory("EN", mAbbreviationDictionary, this.cbUseAlphaNumericOptimization.isSelected(), alphaNumericPattern); Tokenizer stdTokenizer = null; try { stdTokenizer = OpenNLPUtils.createTokenizer(); } catch (IOException ex) { Logger.getLogger(TokenizerTrainer.class.getName()).log(Level.SEVERE, null, ex); } List<FileSplit> FileSplits = FileSplit.generateFileSplitsLOO(mFileCollectionListModel); File trainingFile = new File("en-token.train"); File testFile = new File("en-token.test"); SummaryStatistics curFStats = new SummaryStatistics(); SummaryStatistics curRecallStats = new SummaryStatistics(); SummaryStatistics curPrecisionStats = new SummaryStatistics(); SummaryStatistics stdFStats = new SummaryStatistics(); SummaryStatistics stdRecallStats = new SummaryStatistics(); SummaryStatistics stdPrecisionStats = new SummaryStatistics(); java.io.BufferedOutputStream trainingFileWriter = null; for (FileSplit curFileSplit : FileSplits) { try { //create training file trainingFileWriter = new java.io.BufferedOutputStream( new java.io.FileOutputStream(trainingFile)); for (File curTrainingFile : curFileSplit.getTrainingFiles()) { java.io.BufferedInputStream curTrainingFileReader = null; try { curTrainingFileReader = new java.io.BufferedInputStream( new java.io.FileInputStream(curTrainingFile)); while (curTrainingFileReader.available() > 0) { trainingFileWriter.write(curTrainingFileReader.read()); } } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (curTrainingFileReader != null) { curTrainingFileReader.close(); } } } trainingFileWriter.write('\n'); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (trainingFileWriter != null) { try { trainingFileWriter.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } //create test file java.io.BufferedOutputStream testFileWriter = null; try { //create training file testFileWriter = new java.io.BufferedOutputStream(new java.io.FileOutputStream(testFile)); for (File curTrainingFile : curFileSplit.getTestFiles()) { String testingFileName = curTrainingFile.getCanonicalPath(); textTestResults .setText(textTestResults.getText() + "testing with " + testingFileName + "\n"); java.io.BufferedInputStream curTrainingFileReader = null; try { curTrainingFileReader = new java.io.BufferedInputStream( new java.io.FileInputStream(curTrainingFile)); while (curTrainingFileReader.available() > 0) { int read = curTrainingFileReader.read(); testFileWriter.write(read); } } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (curTrainingFileReader != null) { curTrainingFileReader.close(); } } } testFileWriter.write('\n'); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (testFileWriter != null) { try { testFileWriter.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } //create and train model ObjectStream<String> trainingLineStream = null; TokenizerModel train = null; try { trainingLineStream = new PlainTextByLineStream(new FileInputStream(trainingFile), charset); ObjectStream<TokenSample> sampleStream = null; try { sampleStream = new TokenSampleStream(trainingLineStream); train = TokenizerME.train(sampleStream, myTokenizerFactory, TrainingParameters.defaultParams()); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (sampleStream != null) { try { sampleStream.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } } catch (FileNotFoundException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (trainingLineStream != null) { try { trainingLineStream.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } if (train != null) { ObjectStream<String> testingLineStream = null; try { testingLineStream = new PlainTextByLineStream(new FileInputStream(testFile), charset); ObjectStream<TokenSample> sampleStream = null; try { sampleStream = new TokenSampleStream(testingLineStream); TokenizerME testDetector = new TokenizerME(train); TokenizerEvaluator evaluator = new TokenizerEvaluator(testDetector); evaluator.evaluate(sampleStream); FMeasure testFMeasure = evaluator.getFMeasure(); curFStats.addValue(testFMeasure.getFMeasure()); curRecallStats.addValue(testFMeasure.getRecallScore()); curPrecisionStats.addValue(testFMeasure.getPrecisionScore()); textTestResults.setText(textTestResults.getText() + testFMeasure.getFMeasure() + " " + testFMeasure.getPrecisionScore() + " " + testFMeasure.getRecallScore() + "\n"); if (stdTokenizer != null) { testingLineStream = new PlainTextByLineStream(new FileInputStream(testFile), charset); sampleStream = new TokenSampleStream(testingLineStream); TokenizerEvaluator stdEvaluator = new TokenizerEvaluator(stdTokenizer); stdEvaluator.evaluate(sampleStream); FMeasure stdFMeasure = stdEvaluator.getFMeasure(); stdFStats.addValue(stdFMeasure.getFMeasure()); stdRecallStats.addValue(stdFMeasure.getRecallScore()); stdPrecisionStats.addValue(stdFMeasure.getPrecisionScore()); textTestResults.setText(textTestResults.getText() + " " + stdFMeasure.getFMeasure() + " " + stdFMeasure.getPrecisionScore() + " " + stdFMeasure.getRecallScore() + "\n"); } textTestResults.setText(textTestResults.getText() + "\n"); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (sampleStream != null) { try { sampleStream.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } } catch (FileNotFoundException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (testingLineStream != null) { try { testingLineStream.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } } } textTestResults.setText(textTestResults.getText() + "\n"); textTestResults.setText(textTestResults.getText() + "test model\n"); textTestResults.setText(textTestResults.getText() + "f score mean " + curFStats.getMean() + " stdDev " + curFStats.getStandardDeviation() + "\n"); textTestResults.setText(textTestResults.getText() + "recall mean " + curRecallStats.getMean() + " stdDev " + curRecallStats.getStandardDeviation() + "\n"); textTestResults.setText(textTestResults.getText() + "precision score mean " + curPrecisionStats.getMean() + " stdDev " + curPrecisionStats.getStandardDeviation() + "\n"); textTestResults.setText(textTestResults.getText() + "std model\n"); textTestResults.setText(textTestResults.getText() + "f score mean " + stdFStats.getMean() + " stdDev " + stdFStats.getStandardDeviation() + "\n"); textTestResults.setText(textTestResults.getText() + "recall mean " + stdRecallStats.getMean() + " stdDev " + stdRecallStats.getStandardDeviation() + "\n"); textTestResults.setText(textTestResults.getText() + "precision score mean " + stdPrecisionStats.getMean() + " stdDev " + stdPrecisionStats.getStandardDeviation() + "\n"); //create combinded training file trainingFileWriter = null; try { trainingFileWriter = new java.io.BufferedOutputStream(new java.io.FileOutputStream(trainingFile)); for (File curTrainingFile : mFileCollectionListModel) { java.io.BufferedInputStream curTrainingFileReader = null; try { curTrainingFileReader = new java.io.BufferedInputStream( new java.io.FileInputStream(curTrainingFile)); while (curTrainingFileReader.available() > 0) { trainingFileWriter.write(curTrainingFileReader.read()); } } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (curTrainingFileReader != null) { curTrainingFileReader.close(); } } } trainingFileWriter.write('\n'); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (trainingFileWriter != null) { try { trainingFileWriter.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } //create and train model ObjectStream<String> lineStream = null; this.createdObject = null; try { lineStream = new PlainTextByLineStream(new FileInputStream(trainingFile), charset); ObjectStream<TokenSample> sampleStream = null; try { sampleStream = new TokenSampleStream(lineStream); this.createdObject = TokenizerME.train(sampleStream, myTokenizerFactory, TrainingParameters.defaultParams()); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (sampleStream != null) { try { sampleStream.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } } catch (FileNotFoundException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (lineStream != null) { try { lineStream.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } if (createdObject != null) { OutputStream modelOut = null; File modelFile = new File("en-fiction-token.bin"); try { modelOut = new BufferedOutputStream(new FileOutputStream(modelFile)); createdObject.serialize(modelOut); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (modelOut != null) { try { modelOut.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } } textTestResults.setText(textTestResults.getText() + "done"); }).start(); }
From source file:com.civprod.writerstoolbox.OpenNLP.training.WordSplitingTokenizerTrainer.java
private void cmdTrainActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_cmdTrainActionPerformed final WordSplitingTokenizerTrainer tempThis = this; final Charset utf8 = Charset.forName("UTF-8"); new Thread(() -> { textTestResults.setText(""); //create TokenizerFactory part of the training context WordSplittingTokenizerFactory myTokenizerFactory = new WordSplittingTokenizerFactory("EN", mAbbreviationDictionary, false, null, mSpellingDictionary, (TimeComplexity) comboTimeComplexity.getSelectedItem()); Tokenizer stdTokenizer = null;//from www . j a v a 2 s.c om try { stdTokenizer = OpenNLPUtils.createTokenizer(); } catch (IOException ex) { Logger.getLogger(WordSplitingTokenizerTrainer.class.getName()).log(Level.SEVERE, null, ex); } Tokenizer myNonSplitingTokenizer = null; try { myNonSplitingTokenizer = OpenNLPUtils.createTokenizer(OpenNLPUtils.readTokenizerModel( OpenNLPUtils.buildModelFileStream(".\\data\\OpenNLP\\en-fiction-token.bin"))); } catch (IOException ex) { Logger.getLogger(WordSplitingTokenizerTrainer.class.getName()).log(Level.SEVERE, null, ex); } List<FileSplit> FileSplits = FileSplit.generateFileSplitsLOO(mFileCollectionListModel); File trainingFile = new File("en-token.train"); File testFile = new File("en-token.test"); SummaryStatistics curFStats = new SummaryStatistics(); SummaryStatistics curRecallStats = new SummaryStatistics(); SummaryStatistics curPrecisionStats = new SummaryStatistics(); SummaryStatistics stdFStats = new SummaryStatistics(); SummaryStatistics stdRecallStats = new SummaryStatistics(); SummaryStatistics stdPrecisionStats = new SummaryStatistics(); SummaryStatistics myNonSplitFStats = new SummaryStatistics(); SummaryStatistics myNonSplitRecallStats = new SummaryStatistics(); SummaryStatistics myNonSplitPrecisionStats = new SummaryStatistics(); java.io.BufferedWriter trainingFileWriter = null; for (FileSplit curFileSplit : FileSplits) { try { //create training file trainingFileWriter = new java.io.BufferedWriter( new java.io.OutputStreamWriter(new java.io.FileOutputStream(trainingFile), utf8)); for (File curTrainingFile : curFileSplit.getTrainingFiles()) { java.io.BufferedReader curTrainingFileReader = null; try { Charset fileCharset = FileUtils.determineCharset(curTrainingFile); if (fileCharset == null) { fileCharset = utf8; } curTrainingFileReader = new java.io.BufferedReader(new java.io.InputStreamReader( new java.io.FileInputStream(curTrainingFile), fileCharset)); while (curTrainingFileReader.ready()) { String curLine = curTrainingFileReader.readLine(); trainingFileWriter.append(curLine).append("\n"); } } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (curTrainingFileReader != null) { curTrainingFileReader.close(); } } } trainingFileWriter.write('\n'); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (trainingFileWriter != null) { try { trainingFileWriter.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } //create test file java.io.BufferedWriter testFileWriter = null; try { //create training file testFileWriter = new java.io.BufferedWriter( new java.io.OutputStreamWriter(new java.io.FileOutputStream(testFile), utf8)); for (File curTrainingFile : curFileSplit.getTestFiles()) { String testingFileName = curTrainingFile.getCanonicalPath(); textTestResults .setText(textTestResults.getText() + "testing with " + testingFileName + "\n"); java.io.BufferedReader curTrainingFileReader = null; try { Charset fileCharset = FileUtils.determineCharset(curTrainingFile); if (fileCharset == null) { fileCharset = utf8; } curTrainingFileReader = new java.io.BufferedReader(new java.io.InputStreamReader( new java.io.FileInputStream(curTrainingFile), fileCharset)); while (curTrainingFileReader.ready()) { String curLine = curTrainingFileReader.readLine(); testFileWriter.append(curLine).append("\n"); } } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (curTrainingFileReader != null) { curTrainingFileReader.close(); } } } testFileWriter.write('\n'); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (testFileWriter != null) { try { testFileWriter.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } //create and train model ObjectStream<String> trainingLineStream = null; TokenizerModel train = null; try { trainingLineStream = new PlainTextByLineStream(new FileInputStream(trainingFile), utf8); ObjectStream<TokenSample> sampleStream = null; try { sampleStream = new TokenSampleStream(trainingLineStream); train = TokenizerME.train(sampleStream, myTokenizerFactory, TrainingParameters.defaultParams()); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (sampleStream != null) { try { sampleStream.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } } catch (FileNotFoundException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (trainingLineStream != null) { try { trainingLineStream.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } if (train != null) { ObjectStream<String> testingLineStream = null; try { testingLineStream = new PlainTextByLineStream(new FileInputStream(testFile), utf8); ObjectStream<TokenSample> sampleStream = null; try { sampleStream = new TokenSampleStream(testingLineStream); TokenizerME testDetector = new TokenizerME(train); TokenizerEvaluator evaluator = new TokenizerEvaluator(testDetector); evaluator.evaluate(sampleStream); FMeasure testFMeasure = evaluator.getFMeasure(); curFStats.addValue(testFMeasure.getFMeasure()); curRecallStats.addValue(testFMeasure.getRecallScore()); curPrecisionStats.addValue(testFMeasure.getPrecisionScore()); textTestResults.setText(textTestResults.getText() + testFMeasure.getFMeasure() + " " + testFMeasure.getPrecisionScore() + " " + testFMeasure.getRecallScore() + "\n"); if (stdTokenizer != null) { testingLineStream = new PlainTextByLineStream(new FileInputStream(testFile), utf8); sampleStream = new TokenSampleStream(testingLineStream); TokenizerEvaluator stdEvaluator = new TokenizerEvaluator(stdTokenizer); stdEvaluator.evaluate(sampleStream); FMeasure stdFMeasure = stdEvaluator.getFMeasure(); stdFStats.addValue(stdFMeasure.getFMeasure()); stdRecallStats.addValue(stdFMeasure.getRecallScore()); stdPrecisionStats.addValue(stdFMeasure.getPrecisionScore()); textTestResults.setText(textTestResults.getText() + " " + stdFMeasure.getFMeasure() + " " + stdFMeasure.getPrecisionScore() + " " + stdFMeasure.getRecallScore() + "\n"); } if (myNonSplitingTokenizer != null) { testingLineStream = new PlainTextByLineStream(new FileInputStream(testFile), utf8); sampleStream = new TokenSampleStream(testingLineStream); TokenizerEvaluator myNonSplitingEvaluator = new TokenizerEvaluator( myNonSplitingTokenizer); myNonSplitingEvaluator.evaluate(sampleStream); FMeasure myNonSplitFMeasure = myNonSplitingEvaluator.getFMeasure(); myNonSplitFStats.addValue(myNonSplitFMeasure.getFMeasure()); myNonSplitRecallStats.addValue(myNonSplitFMeasure.getRecallScore()); myNonSplitPrecisionStats.addValue(myNonSplitFMeasure.getPrecisionScore()); textTestResults .setText(textTestResults.getText() + " " + myNonSplitFMeasure.getFMeasure() + " " + myNonSplitFMeasure.getPrecisionScore() + " " + myNonSplitFMeasure.getRecallScore() + "\n"); } textTestResults.setText(textTestResults.getText() + "\n"); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (sampleStream != null) { try { sampleStream.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } } catch (FileNotFoundException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (testingLineStream != null) { try { testingLineStream.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } } } textTestResults.setText(textTestResults.getText() + "\n"); textTestResults.setText(textTestResults.getText() + "test model\n"); textTestResults.setText(textTestResults.getText() + "f score mean " + curFStats.getMean() + " stdDev " + curFStats.getStandardDeviation() + "\n"); textTestResults.setText(textTestResults.getText() + "recall mean " + curRecallStats.getMean() + " stdDev " + curRecallStats.getStandardDeviation() + "\n"); textTestResults.setText(textTestResults.getText() + "precision score mean " + curPrecisionStats.getMean() + " stdDev " + curPrecisionStats.getStandardDeviation() + "\n"); textTestResults.setText(textTestResults.getText() + "std model\n"); textTestResults.setText(textTestResults.getText() + "f score mean " + stdFStats.getMean() + " stdDev " + stdFStats.getStandardDeviation() + "\n"); textTestResults.setText(textTestResults.getText() + "recall mean " + stdRecallStats.getMean() + " stdDev " + stdRecallStats.getStandardDeviation() + "\n"); textTestResults.setText(textTestResults.getText() + "precision score mean " + stdPrecisionStats.getMean() + " stdDev " + stdPrecisionStats.getStandardDeviation() + "\n"); textTestResults.setText(textTestResults.getText() + "my non spliting model\n"); textTestResults.setText(textTestResults.getText() + "f score mean " + myNonSplitFStats.getMean() + " stdDev " + myNonSplitFStats.getStandardDeviation() + "\n"); textTestResults.setText(textTestResults.getText() + "recall mean " + myNonSplitRecallStats.getMean() + " stdDev " + myNonSplitRecallStats.getStandardDeviation() + "\n"); textTestResults.setText( textTestResults.getText() + "precision score mean " + myNonSplitPrecisionStats.getMean() + " stdDev " + myNonSplitPrecisionStats.getStandardDeviation() + "\n"); //create combinded training file trainingFileWriter = null; try { trainingFileWriter = new java.io.BufferedWriter( new java.io.OutputStreamWriter(new java.io.FileOutputStream(trainingFile), utf8)); for (File curTrainingFile : mFileCollectionListModel) { java.io.BufferedReader curTrainingFileReader = null; try { Charset fileCharset = FileUtils.determineCharset(curTrainingFile); if (fileCharset == null) { fileCharset = utf8; } curTrainingFileReader = new java.io.BufferedReader(new java.io.InputStreamReader( new java.io.FileInputStream(curTrainingFile), fileCharset)); while (curTrainingFileReader.ready()) { String curLine = curTrainingFileReader.readLine(); trainingFileWriter.append(curLine).append("\n"); } } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (curTrainingFileReader != null) { curTrainingFileReader.close(); } } } trainingFileWriter.write('\n'); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (trainingFileWriter != null) { try { trainingFileWriter.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } //create and train model ObjectStream<String> lineStream = null; this.createdObject = null; try { lineStream = new PlainTextByLineStream(new FileInputStream(trainingFile), utf8); ObjectStream<TokenSample> sampleStream = null; try { sampleStream = new TokenSampleStream(lineStream); this.createdObject = TokenizerME.train(sampleStream, myTokenizerFactory, TrainingParameters.defaultParams()); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (sampleStream != null) { try { sampleStream.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } } catch (FileNotFoundException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (lineStream != null) { try { lineStream.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } if (createdObject != null) { OutputStream modelOut = null; File modelFile = new File("en-fiction-token.bin"); try { modelOut = new BufferedOutputStream(new FileOutputStream(modelFile)); createdObject.serialize(modelOut); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } finally { if (modelOut != null) { try { modelOut.close(); } catch (IOException ex) { Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex); } } } } textTestResults.setText(textTestResults.getText() + "done"); }).start(); }